From: Vasudev Kamath Date: Wed, 8 Aug 2018 15:13:41 +0000 (+0530) Subject: New upstream version 0.29.0 X-Git-Tag: archive/raspbian/0.35.0-2+rpi1~3^2^2^2^2^2^2^2~22^2 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=836c94aba88b918810431be86eb23709b733162b;p=cargo.git New upstream version 0.29.0 --- 836c94aba88b918810431be86eb23709b733162b diff --cc vendor/atty-0.2.11/.cargo-checksum.json index 000000000,000000000..d828fa118 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652"} diff --cc vendor/atty-0.2.11/.travis.yml index 000000000,000000000..a628724c9 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/.travis.yml @@@ -1,0 -1,0 +1,73 @@@ ++sudo: false ++language: rust ++matrix: ++ fast_finish: true ++ include: ++ - rust: nightly ++ - rust: nightly ++ os: osx ++ - rust: beta ++ - rust: beta ++ os: osx ++ - rust: stable ++ - rust: stable ++ os: osx ++ allow_failures: ++ - rust: nightly ++ ++before_cache: ++ # Travis can't cache files that are not readable by "others" ++ - chmod -R a+r $HOME/.cargo ++ ++before_install: ++ # install kcov ++ - > ++ if [ ! -d "$HOME/.kcov/bin" ]; then ++ wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz && ++ tar xzf master.tar.gz && mkdir kcov-master/build && cd kcov-master/build && ++ cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME/.kcov .. && make && make install && cd ../.. ++ fi ++ - export PATH=$HOME/.kcov/bin:$PATH ++ ++script: ++ - cargo build ++ ++cache: ++ cargo: true ++ apt: true ++ directories: ++ - target/debug/deps ++ - target/debug/build ++ ++addons: ++ apt: ++ packages: ++ - libcurl4-openssl-dev ++ - libelf-dev ++ - libdw-dev ++ - binutils-dev ++ - libiberty-dev ++ ++after_success: ++ - '[ $TRAVIS_RUST_VERSION = stable ] && ++ [ $TRAVIS_BRANCH = master ] && ++ [ $TRAVIS_PULL_REQUEST = false ] && ++ (ls target/debug && ++ RUSTFLAGS="-C link-dead-code" cargo test --no-run && ++ for file in target/debug/atty-*; do ++ if [[ "${file: -2}" != ".d" ]]; then ++ mkdir -p "target/cov/$(basename $file)"; ++ kcov --exclude-pattern=/.cargo,/usr/lib --verify "target/cov/$(basename $file)" "$file"; ++ fi; ++ done && ++ kcov --coveralls-id=$COVERALLS_REPO_TOKEN --merge target/cov target/cov/* && ++ echo "covered") || true' ++ - '[ $TRAVIS_RUST_VERSION = stable ] && ++ [ $TRAVIS_BRANCH = master ] && ++ [ $TRAVIS_PULL_REQUEST = false ] ++ && cargo doc --no-deps && ++ echo "" > target/doc/index.html && ++ pip install --user ghp-import && ++ /home/travis/.local/bin/ghp-import -n target/doc && ++ git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages && ++ echo "documented"' diff --cc vendor/atty-0.2.11/CHANGELOG.md index 000000000,000000000..0b5c0eab5 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/CHANGELOG.md @@@ -1,0 -1,0 +1,61 @@@ ++# 0.2.11 ++ ++* fix msys detection with `winapi@0.3.5` [#28](https://github.com/softprops/atty/pull/28) ++ ++# 0.2.10 ++ ++* fix wasm regression [#27](https://github.com/softprops/atty/pull/27) ++ ++# 0.2.9 ++ ++* Fix fix pty detection [#25](https://github.com/softprops/atty/pull/25) ++ ++# 0.2.8 ++ ++* Fix an inverted condition on MinGW [#22](https://github.com/softprops/atty/pull/22) ++ ++# 0.2.7 ++ ++* Change `||` to `&&` for whether MSYS is a tty [#24](https://github.com/softprops/atty/pull/24/) ++ ++# 0.2.6 ++ ++* updated winapi dependency to [0.3](https://retep998.github.io/blog/winapi-0.3/) [#18](https://github.com/softprops/atty/pull/18) ++ ++# 0.2.5 ++ ++* added support for Wasm compile targets [#17](https://github.com/softprops/atty/pull/17) ++ ++# 0.2.4 ++ ++* added support for Wasm compile targets [#17](https://github.com/softprops/atty/pull/17) ++ ++# 0.2.3 ++ ++* added support for Redox OS [#14](https://github.com/softprops/atty/pull/14) ++ ++# 0.2.2 ++ ++* use target specific dependencies [#11](https://github.com/softprops/atty/pull/11) ++* Add tty detection for MSYS terminals [#12](https://github.com/softprops/atty/pull/12) ++ ++# 0.2.1 ++ ++* fix windows bug ++ ++# 0.2.0 ++ ++* support for various stream types ++ ++# 0.1.2 ++ ++* windows support (with automated testing) ++* automated code coverage ++ ++# 0.1.1 ++ ++* bumped libc dep from `0.1` to `0.2` ++ ++# 0.1.0 ++ ++* initial release diff --cc vendor/atty-0.2.11/Cargo.toml index 000000000,000000000..0d02b6af2 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/Cargo.toml @@@ -1,0 -1,0 +1,33 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "atty" ++version = "0.2.11" ++authors = ["softprops "] ++description = "A simple interface for querying atty" ++homepage = "https://github.com/softprops/atty" ++documentation = "http://softprops.github.io/atty" ++readme = "README.md" ++keywords = ["terminal", "tty"] ++license = "MIT" ++repository = "https://github.com/softprops/atty" ++[target."cfg(target_os = \"redox\")".dependencies.termion] ++version = "1.5" ++[target."cfg(unix)".dependencies.libc] ++version = "0.2" ++default-features = false ++[target."cfg(windows)".dependencies.winapi] ++version = "0.3" ++features = ["consoleapi", "processenv", "minwinbase", "minwindef", "winbase"] ++[badges.travis-ci] ++repository = "softprops/atty" diff --cc vendor/atty-0.2.11/LICENSE index 000000000,000000000..d1f01c829 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/LICENSE @@@ -1,0 -1,0 +1,20 @@@ ++Copyright (c) 2015-2017 Doug Tangren ++ ++Permission is hereby granted, free of charge, to any person obtaining ++a copy of this software and associated documentation files (the ++"Software"), to deal in the Software without restriction, including ++without limitation the rights to use, copy, modify, merge, publish, ++distribute, sublicense, and/or sell copies of the Software, and to ++permit persons to whom the Software is furnished to do so, subject to ++the following conditions: ++ ++The above copyright notice and this permission notice shall be ++included in all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ++LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --cc vendor/atty-0.2.11/README.md index 000000000,000000000..5ce32d1d7 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/README.md @@@ -1,0 -1,0 +1,76 @@@ ++# atty ++ ++[![Build Status](https://travis-ci.org/softprops/atty.svg?branch=master)](https://travis-ci.org/softprops/atty) [![Build status](https://ci.appveyor.com/api/projects/status/geggrsnsjsuse8cv?svg=true)](https://ci.appveyor.com/project/softprops/atty) [![Coverage Status](https://coveralls.io/repos/softprops/atty/badge.svg?branch=master&service=github)](https://coveralls.io/github/softprops/atty?branch=master) [![crates.io](https://img.shields.io/crates/v/atty.svg)](https://crates.io/crates/atty) [![Released API docs](https://docs.rs/atty/badge.svg)](http://docs.rs/atty) [![Master API docs](https://img.shields.io/badge/docs-master-green.svg)](https://softprops.github.io/atty) ++ ++> are you or are you not a tty? ++ ++ ++## install ++ ++Add the following to your `Cargo.toml` ++ ++```toml ++[dependencies] ++atty = "0.2" ++``` ++ ++## usage ++ ++```rust ++extern crate atty; ++ ++use atty::Stream; ++ ++fn main() { ++ if atty::is(Stream::Stdout) { ++ println!("I'm a terminal"); ++ } else { ++ println!("I'm not"); ++ } ++} ++``` ++ ++## testing ++ ++This library has been unit tested on both unix and windows platforms (via appveyor). ++ ++ ++A simple example program is provided in this repo to test various tty's. By default. ++ ++It prints ++ ++```bash ++$ cargo run --example atty ++stdout? true ++stderr? true ++stdin? true ++``` ++ ++To test std in, pipe some text to the program ++ ++```bash ++$ echo "test" | cargo run --example atty ++stdout? true ++stderr? true ++stdin? false ++``` ++ ++To test std out, pipe the program to something ++ ++```bash ++$ cargo run --example atty | grep std ++stdout? false ++stderr? true ++stdin? true ++``` ++ ++To test std err, pipe the program to something redirecting std err ++ ++```bash ++$ cargo run --example atty 2>&1 | grep std ++stdout? false ++stderr? false ++stdin? true ++``` ++ ++Doug Tangren (softprops) 2015-2017 diff --cc vendor/atty-0.2.11/appveyor.yml index 000000000,000000000..d7fb12794 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/appveyor.yml @@@ -1,0 -1,0 +1,16 @@@ ++environment: ++ matrix: ++ - TARGET: nightly-x86_64-pc-windows-msvc ++ - TARGET: nightly-i686-pc-windows-msvc ++ - TARGET: nightly-x86_64-pc-windows-gnu ++ - TARGET: nightly-i686-pc-windows-gnu ++install: ++ - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe" ++ - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null ++ - ps: $env:PATH="$env:PATH;C:\rust\bin" ++ - call "%VCVARS%" || ver>nul ++ - rustc -vV ++ - cargo -vV ++build: false ++test_script: ++ - cargo build diff --cc vendor/atty-0.2.11/examples/atty.rs index 000000000,000000000..3b3635e59 new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/examples/atty.rs @@@ -1,0 -1,0 +1,9 @@@ ++extern crate atty; ++ ++use atty::{is, Stream}; ++ ++fn main() { ++ println!("stdout? {}", is(Stream::Stdout)); ++ println!("stderr? {}", is(Stream::Stderr)); ++ println!("stdin? {}", is(Stream::Stdin)); ++} diff --cc vendor/atty-0.2.11/rustfmt.toml index 000000000,000000000..d83987dca new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/rustfmt.toml @@@ -1,0 -1,0 +1,10 @@@ ++# keep imports tidy ++reorder_imported_names = true ++reorder_imports = true ++reorder_imports_in_group = true ++# there is no try! ++use_try_shorthand = true ++# don't create rustfmt artifacts ++write_mode = "Replace" ++# reduce wide load ++max_width = 80 diff --cc vendor/atty-0.2.11/src/lib.rs index 000000000,000000000..6bffeadac new file mode 100644 --- /dev/null +++ b/vendor/atty-0.2.11/src/lib.rs @@@ -1,0 -1,0 +1,210 @@@ ++//! atty is a simple utility that answers one question ++//! > is this a tty? ++//! ++//! usage is just as simple ++//! ++//! ``` ++//! if atty::is(atty::Stream::Stdout) { ++//! println!("i'm a tty") ++//! } ++//! ``` ++//! ++//! ``` ++//! if atty::isnt(atty::Stream::Stdout) { ++//! println!("i'm not a tty") ++//! } ++//! ``` ++ ++#![cfg_attr(unix, no_std)] ++ ++#[cfg(unix)] ++extern crate libc; ++#[cfg(windows)] ++extern crate winapi; ++#[cfg(target_os = "redox")] ++extern crate termion; ++ ++#[cfg(windows)] ++use winapi::shared::minwindef::DWORD; ++#[cfg(windows)] ++use winapi::shared::ntdef::WCHAR; ++ ++/// possible stream sources ++#[derive(Clone, Copy, Debug)] ++pub enum Stream { ++ Stdout, ++ Stderr, ++ Stdin, ++} ++ ++/// returns true if this is a tty ++#[cfg(all(unix, not(target_arch = "wasm32")))] ++pub fn is(stream: Stream) -> bool { ++ extern crate libc; ++ ++ let fd = match stream { ++ Stream::Stdout => libc::STDOUT_FILENO, ++ Stream::Stderr => libc::STDERR_FILENO, ++ Stream::Stdin => libc::STDIN_FILENO, ++ }; ++ unsafe { libc::isatty(fd) != 0 } ++} ++ ++/// returns true if this is a tty ++#[cfg(windows)] ++pub fn is(stream: Stream) -> bool { ++ use winapi::um::winbase::{STD_ERROR_HANDLE as STD_ERROR, STD_INPUT_HANDLE as STD_INPUT, ++ STD_OUTPUT_HANDLE as STD_OUTPUT}; ++ ++ let (fd, others) = match stream { ++ Stream::Stdin => (STD_INPUT, [STD_ERROR, STD_OUTPUT]), ++ Stream::Stderr => (STD_ERROR, [STD_INPUT, STD_OUTPUT]), ++ Stream::Stdout => (STD_OUTPUT, [STD_INPUT, STD_ERROR]), ++ }; ++ if unsafe { console_on_any(&[fd]) } { ++ // False positives aren't possible. If we got a console then ++ // we definitely have a tty on stdin. ++ return true; ++ } ++ ++ // At this point, we *could* have a false negative. We can determine that ++ // this is true negative if we can detect the presence of a console on ++ // any of the other streams. If another stream has a console, then we know ++ // we're in a Windows console and can therefore trust the negative. ++ if unsafe { console_on_any(&others) } { ++ return false; ++ } ++ ++ // Otherwise, we fall back to a very strange msys hack to see if we can ++ // sneakily detect the presence of a tty. ++ unsafe { msys_tty_on(fd) } ++} ++ ++/// returns true if this is _not_ a tty ++pub fn isnt(stream: Stream) -> bool { ++ !is(stream) ++} ++ ++/// Returns true if any of the given fds are on a console. ++#[cfg(windows)] ++unsafe fn console_on_any(fds: &[DWORD]) -> bool { ++ use winapi::um::consoleapi::GetConsoleMode; ++ use winapi::um::processenv::GetStdHandle; ++ ++ for &fd in fds { ++ let mut out = 0; ++ let handle = GetStdHandle(fd); ++ if GetConsoleMode(handle, &mut out) != 0 { ++ return true; ++ } ++ } ++ false ++} ++ ++/// Returns true if there is an MSYS tty on the given handle. ++#[cfg(windows)] ++unsafe fn msys_tty_on(fd: DWORD) -> bool { ++ use std::mem; ++ use std::slice; ++ ++ use winapi::ctypes::c_void; ++ use winapi::um::winbase::GetFileInformationByHandleEx; ++ use winapi::um::fileapi::FILE_NAME_INFO; ++ use winapi::um::minwinbase::FileNameInfo; ++ use winapi::um::processenv::GetStdHandle; ++ use winapi::shared::minwindef::MAX_PATH; ++ ++ let size = mem::size_of::(); ++ let mut name_info_bytes = vec![0u8; size + MAX_PATH * mem::size_of::()]; ++ let res = GetFileInformationByHandleEx( ++ GetStdHandle(fd), ++ FileNameInfo, ++ &mut *name_info_bytes as *mut _ as *mut c_void, ++ name_info_bytes.len() as u32, ++ ); ++ if res == 0 { ++ return false; ++ } ++ let name_info: &FILE_NAME_INFO = &*(name_info_bytes.as_ptr() as *const FILE_NAME_INFO); ++ let s = slice::from_raw_parts( ++ name_info.FileName.as_ptr(), ++ name_info.FileNameLength as usize / 2, ++ ); ++ let name = String::from_utf16_lossy(s); ++ // This checks whether 'pty' exists in the file name, which indicates that ++ // a pseudo-terminal is attached. To mitigate against false positives ++ // (e.g., an actual file name that contains 'pty'), we also require that ++ // either the strings 'msys-' or 'cygwin-' are in the file name as well.) ++ let is_msys = name.contains("msys-") || name.contains("cygwin-"); ++ let is_pty = name.contains("-pty"); ++ is_msys && is_pty ++} ++ ++/// returns true if this is a tty ++#[cfg(target_os = "redox")] ++pub fn is(stream: Stream) -> bool { ++ use std::io; ++ use termion::is_tty; ++ ++ match stream { ++ Stream::Stdin => is_tty(&io::stdin()), ++ Stream::Stdout => is_tty(&io::stdout()), ++ Stream::Stderr => is_tty(&io::stderr()), ++ } ++} ++ ++/// returns true if this is a tty ++#[cfg(target_arch = "wasm32")] ++pub fn is(_stream: Stream) -> bool { ++ false ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::{Stream, is}; ++ ++ #[test] ++ #[cfg(windows)] ++ fn is_err() { ++ // appveyor pipes its output ++ assert!(!is(Stream::Stderr)) ++ } ++ ++ #[test] ++ #[cfg(windows)] ++ fn is_out() { ++ // appveyor pipes its output ++ assert!(!is(Stream::Stdout)) ++ } ++ ++ #[test] ++ #[cfg(windows)] ++ fn is_in() { ++ assert!(is(Stream::Stdin)) ++ } ++ ++ #[test] ++ #[cfg(unix)] ++ fn is_err() { ++ assert!(is(Stream::Stderr)) ++ } ++ ++ #[test] ++ #[cfg(unix)] ++ fn is_out() { ++ assert!(is(Stream::Stdout)) ++ } ++ ++ #[test] ++ #[cfg(target_os = "macos")] ++ fn is_in() { ++ // macos on travis seems to pipe its input ++ assert!(is(Stream::Stdin)) ++ } ++ ++ #[test] ++ #[cfg(all(not(target_os = "macos"), unix))] ++ fn is_in() { ++ assert!(is(Stream::Stdin)) ++ } ++} diff --cc vendor/cmake-0.1.32/.cargo-checksum.json index 000000000,000000000..8a0f3dd1b new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"b56821938fa1a3aaf4f0c4f49504928c5a7fcc56cbc9855be8fc2e98567e750c"} diff --cc vendor/cmake-0.1.32/.travis.yml index 000000000,000000000..bcd71cbe8 new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/.travis.yml @@@ -1,0 -1,0 +1,20 @@@ ++language: rust ++rust: ++ - stable ++ - beta ++ - nightly ++sudo: false ++before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++script: ++ - cargo test --verbose ++ - cargo doc --no-deps ++after_success: ++ - travis-cargo --only nightly doc-upload ++env: ++ global: ++ secure: "IA467qqr1j0BpyTqG6hO8Kpt+EUDEjO1pBVhu4+L76/dygkQIwROgqdT7uXZqBPMjU6Rbi0wzGXXHJjbCWVTCjh7U/Q0bK2svtR8DKtM0o1Un/YftSUFt2p/WoiJ9PrkUjKh1rHuoyijpUqAls0JfIz8OdC45egT2SWDufljo+s=" ++ ++notifications: ++ email: ++ on_success: never diff --cc vendor/cmake-0.1.32/Cargo.toml index 000000000,000000000..2f45135c7 new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/Cargo.toml @@@ -1,0 -1,0 +1,25 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "cmake" ++version = "0.1.32" ++authors = ["Alex Crichton "] ++description = "A build dependency for running `cmake` to build a native library\n" ++homepage = "https://github.com/alexcrichton/cmake-rs" ++documentation = "https://docs.rs/cmake" ++readme = "README.md" ++keywords = ["build-dependencies"] ++license = "MIT/Apache-2.0" ++repository = "https://github.com/alexcrichton/cmake-rs" ++[dependencies.cc] ++version = "1.0" diff --cc vendor/cmake-0.1.32/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/cmake-0.1.32/LICENSE-MIT index 000000000,000000000..39e0ed660 new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2014 Alex Crichton ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/cmake-0.1.32/README.md index 000000000,000000000..42e4e9b3d new file mode 100644 --- /dev/null +++ b/vendor/cmake-0.1.32/README.md @@@ -1,0 -1,0 +1,34 @@@ ++# cmake ++ ++[![Build Status](https://travis-ci.org/alexcrichton/cmake-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/cmake-rs) ++ ++[Documentation](https://docs.rs/cmake) ++ ++A build dependency for running the `cmake` build tool to compile a native ++library. ++ ++```toml ++# Cargo.toml ++[build-dependencies] ++cmake = "0.2" ++``` ++ ++The CMake executable is assumed to be `cmake` unless the `CMAKE` ++environmental variable is set. ++ ++# License ++ ++This project is licensed under either of ++ ++ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ++ http://www.apache.org/licenses/LICENSE-2.0) ++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or ++ http://opensource.org/licenses/MIT) ++ ++at your option. ++ ++### Contribution ++ ++Unless you explicitly state otherwise, any contribution intentionally submitted ++for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be ++dual licensed as above, without any additional terms or conditions. diff --cc vendor/cmake-0.1.32/src/lib.rs index 000000000,000000000..4a6b55581 new file mode 100755 --- /dev/null +++ b/vendor/cmake-0.1.32/src/lib.rs @@@ -1,0 -1,0 +1,686 @@@ ++//! A build dependency for running `cmake` to build a native library ++//! ++//! This crate provides some necessary boilerplate and shim support for running ++//! the system `cmake` command to build a native library. It will add ++//! appropriate cflags for building code to link into Rust, handle cross ++//! compilation, and use the necessary generator for the platform being ++//! targeted. ++//! ++//! The builder-style configuration allows for various variables and such to be ++//! passed down into the build as well. ++//! ++//! ## Installation ++//! ++//! Add this to your `Cargo.toml`: ++//! ++//! ```toml ++//! [build-dependencies] ++//! cmake = "0.1" ++//! ``` ++//! ++//! ## Examples ++//! ++//! ```no_run ++//! use cmake; ++//! ++//! // Builds the project in the directory located in `libfoo`, installing it ++//! // into $OUT_DIR ++//! let dst = cmake::build("libfoo"); ++//! ++//! println!("cargo:rustc-link-search=native={}", dst.display()); ++//! println!("cargo:rustc-link-lib=static=foo"); ++//! ``` ++//! ++//! ```no_run ++//! use cmake::Config; ++//! ++//! let dst = Config::new("libfoo") ++//! .define("FOO", "BAR") ++//! .cflag("-foo") ++//! .build(); ++//! println!("cargo:rustc-link-search=native={}", dst.display()); ++//! println!("cargo:rustc-link-lib=static=foo"); ++//! ``` ++ ++#![deny(missing_docs)] ++ ++extern crate cc; ++ ++use std::env; ++use std::ffi::{OsString, OsStr}; ++use std::fs::{self, File}; ++use std::io::ErrorKind; ++use std::io::prelude::*; ++use std::path::{Path, PathBuf}; ++use std::process::Command; ++ ++/// Builder style configuration for a pending CMake build. ++pub struct Config { ++ path: PathBuf, ++ generator: Option, ++ cflags: OsString, ++ cxxflags: OsString, ++ defines: Vec<(OsString, OsString)>, ++ deps: Vec, ++ target: Option, ++ host: Option, ++ out_dir: Option, ++ profile: Option, ++ build_args: Vec, ++ cmake_target: Option, ++ env: Vec<(OsString, OsString)>, ++ static_crt: Option, ++ uses_cxx11: bool, ++ always_configure: bool, ++ no_build_target: bool, ++} ++ ++/// Builds the native library rooted at `path` with the default cmake options. ++/// This will return the directory in which the library was installed. ++/// ++/// # Examples ++/// ++/// ```no_run ++/// use cmake; ++/// ++/// // Builds the project in the directory located in `libfoo`, installing it ++/// // into $OUT_DIR ++/// let dst = cmake::build("libfoo"); ++/// ++/// println!("cargo:rustc-link-search=native={}", dst.display()); ++/// println!("cargo:rustc-link-lib=static=foo"); ++/// ``` ++/// ++pub fn build>(path: P) -> PathBuf { ++ Config::new(path.as_ref()).build() ++} ++ ++impl Config { ++ /// Creates a new blank set of configuration to build the project specified ++ /// at the path `path`. ++ pub fn new>(path: P) -> Config { ++ Config { ++ path: env::current_dir().unwrap().join(path), ++ generator: None, ++ cflags: OsString::new(), ++ cxxflags: OsString::new(), ++ defines: Vec::new(), ++ deps: Vec::new(), ++ profile: None, ++ out_dir: None, ++ target: None, ++ host: None, ++ build_args: Vec::new(), ++ cmake_target: None, ++ env: Vec::new(), ++ static_crt: None, ++ uses_cxx11: false, ++ always_configure: true, ++ no_build_target: false, ++ } ++ } ++ ++ /// Sets the build-tool generator (`-G`) for this compilation. ++ pub fn generator>(&mut self, generator: T) -> &mut Config { ++ self.generator = Some(generator.as_ref().to_owned()); ++ self ++ } ++ ++ /// Adds a custom flag to pass down to the C compiler, supplementing those ++ /// that this library already passes. ++ pub fn cflag>(&mut self, flag: P) -> &mut Config { ++ self.cflags.push(" "); ++ self.cflags.push(flag.as_ref()); ++ self ++ } ++ ++ /// Adds a custom flag to pass down to the C++ compiler, supplementing those ++ /// that this library already passes. ++ pub fn cxxflag>(&mut self, flag: P) -> &mut Config { ++ self.cxxflags.push(" "); ++ self.cxxflags.push(flag.as_ref()); ++ self ++ } ++ ++ /// Adds a new `-D` flag to pass to cmake during the generation step. ++ pub fn define(&mut self, k: K, v: V) -> &mut Config ++ where K: AsRef, V: AsRef ++ { ++ self.defines.push((k.as_ref().to_owned(), v.as_ref().to_owned())); ++ self ++ } ++ ++ /// Registers a dependency for this compilation on the native library built ++ /// by Cargo previously. ++ /// ++ /// This registration will modify the `CMAKE_PREFIX_PATH` environment ++ /// variable for the build system generation step. ++ pub fn register_dep(&mut self, dep: &str) -> &mut Config { ++ self.deps.push(dep.to_string()); ++ self ++ } ++ ++ /// Sets the target triple for this compilation. ++ /// ++ /// This is automatically scraped from `$TARGET` which is set for Cargo ++ /// build scripts so it's not necessary to call this from a build script. ++ pub fn target(&mut self, target: &str) -> &mut Config { ++ self.target = Some(target.to_string()); ++ self ++ } ++ ++ /// Disables the target option for this compilation. ++ pub fn no_build_target(&mut self, no_build_target: bool) -> &mut Config { ++ self.no_build_target = no_build_target; ++ self ++ } ++ ++ /// Sets the host triple for this compilation. ++ /// ++ /// This is automatically scraped from `$HOST` which is set for Cargo ++ /// build scripts so it's not necessary to call this from a build script. ++ pub fn host(&mut self, host: &str) -> &mut Config { ++ self.host = Some(host.to_string()); ++ self ++ } ++ ++ /// Sets the output directory for this compilation. ++ /// ++ /// This is automatically scraped from `$OUT_DIR` which is set for Cargo ++ /// build scripts so it's not necessary to call this from a build script. ++ pub fn out_dir>(&mut self, out: P) -> &mut Config { ++ self.out_dir = Some(out.as_ref().to_path_buf()); ++ self ++ } ++ ++ /// Sets the profile for this compilation. ++ /// ++ /// This is automatically scraped from `$PROFILE` which is set for Cargo ++ /// build scripts so it's not necessary to call this from a build script. ++ pub fn profile(&mut self, profile: &str) -> &mut Config { ++ self.profile = Some(profile.to_string()); ++ self ++ } ++ ++ /// Configures whether the /MT flag or the /MD flag will be passed to msvc build tools. ++ /// ++ /// This option defaults to `false`, and affect only msvc targets. ++ pub fn static_crt(&mut self, static_crt: bool) -> &mut Config { ++ self.static_crt = Some(static_crt); ++ self ++ } ++ ++ /// Add an argument to the final `cmake` build step ++ pub fn build_arg>(&mut self, arg: A) -> &mut Config { ++ self.build_args.push(arg.as_ref().to_owned()); ++ self ++ } ++ ++ /// Configure an environment variable for the `cmake` processes spawned by ++ /// this crate in the `build` step. ++ pub fn env(&mut self, key: K, value: V) -> &mut Config ++ where K: AsRef, ++ V: AsRef, ++ { ++ self.env.push((key.as_ref().to_owned(), value.as_ref().to_owned())); ++ self ++ } ++ ++ /// Sets the build target for the final `cmake` build step, this will ++ /// default to "install" if not specified. ++ pub fn build_target(&mut self, target: &str) -> &mut Config { ++ self.cmake_target = Some(target.to_string()); ++ self ++ } ++ ++ /// Alters the default target triple on OSX to ensure that c++11 is ++ /// available. Does not change the target triple if it is explicitly ++ /// specified. ++ /// ++ /// This does not otherwise affect any CXX flags, i.e. it does not set ++ /// -std=c++11 or -stdlib=libc++. ++ pub fn uses_cxx11(&mut self) -> &mut Config { ++ self.uses_cxx11 = true; ++ self ++ } ++ ++ /// Forces CMake to always run before building the custom target. ++ /// ++ /// In some cases, when you have a big project, you can disable ++ /// subsequents runs of cmake to make `cargo build` faster. ++ pub fn always_configure(&mut self, always_configure: bool) -> &mut Config { ++ self.always_configure = always_configure; ++ self ++ } ++ ++ /// Run this configuration, compiling the library with all the configured ++ /// options. ++ /// ++ /// This will run both the build system generator command as well as the ++ /// command to build the library. ++ pub fn build(&mut self) -> PathBuf { ++ let target = match self.target.clone() { ++ Some(t) => t, ++ None => { ++ let mut t = getenv_unwrap("TARGET"); ++ if t.ends_with("-darwin") && self.uses_cxx11 { ++ t = t + "11" ++ } ++ t ++ } ++ }; ++ let host = self.host.clone().unwrap_or_else(|| { ++ getenv_unwrap("HOST") ++ }); ++ let msvc = target.contains("msvc"); ++ let mut c_cfg = cc::Build::new(); ++ c_cfg.cargo_metadata(false) ++ .opt_level(0) ++ .debug(false) ++ .target(&target) ++ .warnings(false) ++ .host(&host); ++ let mut cxx_cfg = cc::Build::new(); ++ cxx_cfg.cargo_metadata(false) ++ .cpp(true) ++ .opt_level(0) ++ .debug(false) ++ .target(&target) ++ .warnings(false) ++ .host(&host); ++ if let Some(static_crt) = self.static_crt { ++ c_cfg.static_crt(static_crt); ++ cxx_cfg.static_crt(static_crt); ++ } ++ let c_compiler = c_cfg.get_compiler(); ++ let cxx_compiler = cxx_cfg.get_compiler(); ++ ++ let dst = self.out_dir.clone().unwrap_or_else(|| { ++ PathBuf::from(getenv_unwrap("OUT_DIR")) ++ }); ++ let build = dst.join("build"); ++ self.maybe_clear(&build); ++ let _ = fs::create_dir(&build); ++ ++ // Add all our dependencies to our cmake paths ++ let mut cmake_prefix_path = Vec::new(); ++ for dep in &self.deps { ++ let dep = dep.to_uppercase().replace('-', "_"); ++ if let Some(root) = env::var_os(&format!("DEP_{}_ROOT", dep)) { ++ cmake_prefix_path.push(PathBuf::from(root)); ++ } ++ } ++ let system_prefix = env::var_os("CMAKE_PREFIX_PATH") ++ .unwrap_or(OsString::new()); ++ cmake_prefix_path.extend(env::split_paths(&system_prefix) ++ .map(|s| s.to_owned())); ++ let cmake_prefix_path = env::join_paths(&cmake_prefix_path).unwrap(); ++ ++ // Build up the first cmake command to build the build system. ++ let executable = env::var("CMAKE").unwrap_or("cmake".to_owned()); ++ let mut cmd = Command::new(executable); ++ cmd.arg(&self.path) ++ .current_dir(&build); ++ if target.contains("windows-gnu") { ++ if host.contains("windows") { ++ // On MinGW we need to coerce cmake to not generate a visual ++ // studio build system but instead use makefiles that MinGW can ++ // use to build. ++ if self.generator.is_none() { ++ // If make.exe isn't found, that means we may be using a MinGW ++ // toolchain instead of a MSYS2 toolchain. If neither is found, ++ // the build cannot continue. ++ let has_msys2 = Command::new("make").arg("--version").output().err() ++ .map(|e| e.kind() != ErrorKind::NotFound).unwrap_or(true); ++ let has_mingw32 = Command::new("mingw32-make").arg("--version").output().err() ++ .map(|e| e.kind() != ErrorKind::NotFound).unwrap_or(true); ++ ++ let generator = match (has_msys2, has_mingw32) { ++ (true, _) => "MSYS Makefiles", ++ (false, true) => "MinGW Makefiles", ++ (false, false) => fail("no valid generator found for GNU toolchain; MSYS or MinGW must be installed") ++ }; ++ ++ cmd.arg("-G").arg(generator); ++ } ++ } else { ++ // If we're cross compiling onto windows, then set some ++ // variables which will hopefully get things to succeed. Some ++ // systems may need the `windres` or `dlltool` variables set, so ++ // set them if possible. ++ if !self.defined("CMAKE_SYSTEM_NAME") { ++ cmd.arg("-DCMAKE_SYSTEM_NAME=Windows"); ++ } ++ if !self.defined("CMAKE_RC_COMPILER") { ++ let exe = find_exe(c_compiler.path()); ++ if let Some(name) = exe.file_name().unwrap().to_str() { ++ let name = name.replace("gcc", "windres"); ++ let windres = exe.with_file_name(name); ++ if windres.is_file() { ++ let mut arg = OsString::from("-DCMAKE_RC_COMPILER="); ++ arg.push(&windres); ++ cmd.arg(arg); ++ } ++ } ++ } ++ } ++ } else if msvc { ++ // If we're on MSVC we need to be sure to use the right generator or ++ // otherwise we won't get 32/64 bit correct automatically. ++ // This also guarantees that NMake generator isn't chosen implicitly. ++ if self.generator.is_none() { ++ cmd.arg("-G").arg(self.visual_studio_generator(&target)); ++ } ++ } else if target.contains("redox") { ++ if !self.defined("CMAKE_SYSTEM_NAME") { ++ cmd.arg("-DCMAKE_SYSTEM_NAME=Generic"); ++ } ++ } else if target.contains("solaris") { ++ if !self.defined("CMAKE_SYSTEM_NAME") { ++ cmd.arg("-DCMAKE_SYSTEM_NAME=SunOS"); ++ } ++ } ++ let mut is_ninja = false; ++ if let Some(ref generator) = self.generator { ++ cmd.arg("-G").arg(generator); ++ is_ninja = generator.to_string_lossy().contains("Ninja"); ++ } ++ let profile = self.profile.clone().unwrap_or_else(|| { ++ match &getenv_unwrap("PROFILE")[..] { ++ "bench" | "release" => "Release", ++ _ => "Debug", ++ }.to_string() ++ }); ++ for &(ref k, ref v) in &self.defines { ++ let mut os = OsString::from("-D"); ++ os.push(k); ++ os.push("="); ++ os.push(v); ++ cmd.arg(os); ++ } ++ ++ if !self.defined("CMAKE_INSTALL_PREFIX") { ++ let mut dstflag = OsString::from("-DCMAKE_INSTALL_PREFIX="); ++ dstflag.push(&dst); ++ cmd.arg(dstflag); ++ } ++ ++ let build_type = self.defines.iter().find(|&&(ref a, _)| { ++ a == "CMAKE_BUILD_TYPE" ++ }).map(|x| x.1.to_str().unwrap()).unwrap_or(&profile); ++ let build_type_upcase = build_type.chars() ++ .flat_map(|c| c.to_uppercase()) ++ .collect::(); ++ ++ { ++ // let cmake deal with optimization/debuginfo ++ let skip_arg = |arg: &OsStr| { ++ match arg.to_str() { ++ Some(s) => { ++ s.starts_with("-O") || s.starts_with("/O") || s == "-g" ++ } ++ None => false, ++ } ++ }; ++ let mut set_compiler = |kind: &str, ++ compiler: &cc::Tool, ++ extra: &OsString| { ++ let flag_var = format!("CMAKE_{}_FLAGS", kind); ++ let tool_var = format!("CMAKE_{}_COMPILER", kind); ++ if !self.defined(&flag_var) { ++ let mut flagsflag = OsString::from("-D"); ++ flagsflag.push(&flag_var); ++ flagsflag.push("="); ++ flagsflag.push(extra); ++ for arg in compiler.args() { ++ if skip_arg(arg) { ++ continue ++ } ++ flagsflag.push(" "); ++ flagsflag.push(arg); ++ } ++ cmd.arg(flagsflag); ++ } ++ ++ // The visual studio generator apparently doesn't respect ++ // `CMAKE_C_FLAGS` but does respect `CMAKE_C_FLAGS_RELEASE` and ++ // such. We need to communicate /MD vs /MT, so set those vars ++ // here. ++ // ++ // Note that for other generators, though, this *overrides* ++ // things like the optimization flags, which is bad. ++ if self.generator.is_none() && msvc { ++ let flag_var_alt = format!("CMAKE_{}_FLAGS_{}", kind, ++ build_type_upcase); ++ if !self.defined(&flag_var_alt) { ++ let mut flagsflag = OsString::from("-D"); ++ flagsflag.push(&flag_var_alt); ++ flagsflag.push("="); ++ flagsflag.push(extra); ++ for arg in compiler.args() { ++ if skip_arg(arg) { ++ continue ++ } ++ flagsflag.push(" "); ++ flagsflag.push(arg); ++ } ++ cmd.arg(flagsflag); ++ } ++ } ++ ++ // Apparently cmake likes to have an absolute path to the ++ // compiler as otherwise it sometimes thinks that this variable ++ // changed as it thinks the found compiler, /usr/bin/cc, ++ // differs from the specified compiler, cc. Not entirely sure ++ // what's up, but at least this means cmake doesn't get ++ // confused? ++ // ++ // Also specify this on Windows only if we use MSVC with Ninja, ++ // as it's not needed for MSVC with Visual Studio generators and ++ // for MinGW it doesn't really vary. ++ if !self.defined("CMAKE_TOOLCHAIN_FILE") ++ && !self.defined(&tool_var) ++ && (env::consts::FAMILY != "windows" || (msvc && is_ninja)) { ++ let mut ccompiler = OsString::from("-D"); ++ ccompiler.push(&tool_var); ++ ccompiler.push("="); ++ ccompiler.push(find_exe(compiler.path())); ++ #[cfg(windows)] { ++ // CMake doesn't like unescaped `\`s in compiler paths ++ // so we either have to escape them or replace with `/`s. ++ use std::os::windows::ffi::{OsStrExt, OsStringExt}; ++ let wchars = ccompiler.encode_wide().map(|wchar| { ++ if wchar == b'\\' as u16 { '/' as u16 } else { wchar } ++ }).collect::>(); ++ ccompiler = OsString::from_wide(&wchars); ++ } ++ cmd.arg(ccompiler); ++ } ++ }; ++ ++ set_compiler("C", &c_compiler, &self.cflags); ++ set_compiler("CXX", &cxx_compiler, &self.cxxflags); ++ } ++ ++ if !self.defined("CMAKE_BUILD_TYPE") { ++ cmd.arg(&format!("-DCMAKE_BUILD_TYPE={}", profile)); ++ } ++ ++ if !self.defined("CMAKE_TOOLCHAIN_FILE") { ++ if let Ok(s) = env::var("CMAKE_TOOLCHAIN_FILE") { ++ cmd.arg(&format!("-DCMAKE_TOOLCHAIN_FILE={}", s)); ++ } ++ } ++ ++ for &(ref k, ref v) in c_compiler.env().iter().chain(&self.env) { ++ cmd.env(k, v); ++ } ++ ++ if self.always_configure || !build.join("CMakeCache.txt").exists() { ++ println!("CMake project was already configured. Skipping configuration step."); ++ run(cmd.env("CMAKE_PREFIX_PATH", cmake_prefix_path), "cmake"); ++ } ++ ++ let mut makeflags = None; ++ let mut parallel_args = Vec::new(); ++ if let Ok(s) = env::var("NUM_JOBS") { ++ match self.generator.as_ref().map(|g| g.to_string_lossy()) { ++ Some(ref g) if g.contains("Ninja") => { ++ parallel_args.push(format!("-j{}", s)); ++ } ++ Some(ref g) if g.contains("Visual Studio") => { ++ parallel_args.push(format!("/m:{}", s)); ++ } ++ Some(ref g) if g.contains("NMake") => { ++ // NMake creates `Makefile`s, but doesn't understand `-jN`. ++ } ++ _ if fs::metadata(&dst.join("build/Makefile")).is_ok() => { ++ match env::var_os("CARGO_MAKEFLAGS") { ++ // Only do this on non-windows and non-bsd ++ // On Windows, we could be invoking make instead of ++ // mingw32-make which doesn't work with our jobserver ++ // bsdmake also does not work with our job server ++ Some(ref s) if !(cfg!(windows) || ++ cfg!(target_os = "openbsd") || ++ cfg!(target_os = "netbsd") || ++ cfg!(target_os = "freebsd") || ++ cfg!(target_os = "bitrig") || ++ cfg!(target_os = "dragonflybsd") ++ ) => makeflags = Some(s.clone()), ++ ++ // This looks like `make`, let's hope it understands `-jN`. ++ _ => parallel_args.push(format!("-j{}", s)), ++ } ++ } ++ _ => {} ++ } ++ } ++ ++ // And build! ++ let target = self.cmake_target.clone().unwrap_or("install".to_string()); ++ let mut cmd = Command::new("cmake"); ++ for &(ref k, ref v) in c_compiler.env().iter().chain(&self.env) { ++ cmd.env(k, v); ++ } ++ if let Some(flags) = makeflags { ++ cmd.env("MAKEFLAGS", flags); ++ } ++ ++ cmd.arg("--build").arg("."); ++ ++ if !self.no_build_target { ++ cmd.arg("--target").arg(target); ++ } ++ ++ run(cmd.arg("--config").arg(&profile) ++ .arg("--").args(&self.build_args) ++ .args(¶llel_args) ++ .current_dir(&build), "cmake"); ++ ++ println!("cargo:root={}", dst.display()); ++ return dst ++ } ++ ++ fn visual_studio_generator(&self, target: &str) -> String { ++ use cc::windows_registry::{find_vs_version, VsVers}; ++ ++ let base = match find_vs_version() { ++ Ok(VsVers::Vs15) => "Visual Studio 15 2017", ++ Ok(VsVers::Vs14) => "Visual Studio 14 2015", ++ Ok(VsVers::Vs12) => "Visual Studio 12 2013", ++ Ok(_) => panic!("Visual studio version detected but this crate \ ++ doesn't know how to generate cmake files for it, \ ++ can the `cmake` crate be updated?"), ++ Err(msg) => panic!(msg), ++ }; ++ if target.contains("i686") { ++ base.to_string() ++ } else if target.contains("x86_64") { ++ format!("{} Win64", base) ++ } else { ++ panic!("unsupported msvc target: {}", target); ++ } ++ } ++ ++ fn defined(&self, var: &str) -> bool { ++ self.defines.iter().any(|&(ref a, _)| a == var) ++ } ++ ++ // If a cmake project has previously been built (e.g. CMakeCache.txt already ++ // exists), then cmake will choke if the source directory for the original ++ // project being built has changed. Detect this situation through the ++ // `CMAKE_HOME_DIRECTORY` variable that cmake emits and if it doesn't match ++ // we blow away the build directory and start from scratch (the recommended ++ // solution apparently [1]). ++ // ++ // [1]: https://cmake.org/pipermail/cmake/2012-August/051545.html ++ fn maybe_clear(&self, dir: &Path) { ++ // CMake will apparently store canonicalized paths which normally ++ // isn't relevant to us but we canonicalize it here to ensure ++ // we're both checking the same thing. ++ let path = fs::canonicalize(&self.path).unwrap_or(self.path.clone()); ++ let mut f = match File::open(dir.join("CMakeCache.txt")) { ++ Ok(f) => f, ++ Err(..) => return, ++ }; ++ let mut u8contents = Vec::new(); ++ match f.read_to_end(&mut u8contents) { ++ Ok(f) => f, ++ Err(..) => return, ++ }; ++ let contents = String::from_utf8_lossy(&u8contents); ++ drop(f); ++ for line in contents.lines() { ++ if line.starts_with("CMAKE_HOME_DIRECTORY") { ++ let needs_cleanup = match line.split('=').next_back() { ++ Some(cmake_home) => { ++ fs::canonicalize(cmake_home) ++ .ok() ++ .map(|cmake_home| cmake_home != path) ++ .unwrap_or(true) ++ }, ++ None => true ++ }; ++ if needs_cleanup { ++ println!("detected home dir change, cleaning out entire build \ ++ directory"); ++ fs::remove_dir_all(dir).unwrap(); ++ } ++ break ++ } ++ } ++ } ++} ++ ++fn run(cmd: &mut Command, program: &str) { ++ println!("running: {:?}", cmd); ++ let status = match cmd.status() { ++ Ok(status) => status, ++ Err(ref e) if e.kind() == ErrorKind::NotFound => { ++ fail(&format!("failed to execute command: {}\nis `{}` not installed?", ++ e, program)); ++ } ++ Err(e) => fail(&format!("failed to execute command: {}", e)), ++ }; ++ if !status.success() { ++ fail(&format!("command did not execute successfully, got: {}", status)); ++ } ++} ++ ++fn find_exe(path: &Path) -> PathBuf { ++ env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())) ++ .map(|p| p.join(path)) ++ .find(|p| fs::metadata(p).is_ok()) ++ .unwrap_or(path.to_owned()) ++} ++ ++fn getenv_unwrap(v: &str) -> String { ++ match env::var(v) { ++ Ok(s) => s, ++ Err(..) => fail(&format!("environment variable `{}` not defined", v)), ++ } ++} ++ ++fn fail(s: &str) -> ! { ++ panic!("\n{}\n\nbuild script failed, must exit now", s) ++} diff --cc vendor/core-foundation-0.6.1/.cargo-checksum.json index 000000000,000000000..6ccd21442 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2"} diff --cc vendor/core-foundation-0.6.1/Cargo.toml index 000000000,000000000..86a67b527 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/Cargo.toml @@@ -1,0 -1,0 +1,39 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "core-foundation" ++version = "0.6.1" ++authors = ["The Servo Project Developers"] ++description = "Bindings to Core Foundation for OS X" ++homepage = "https://github.com/servo/core-foundation-rs" ++license = "MIT / Apache-2.0" ++repository = "https://github.com/servo/core-foundation-rs" ++[dependencies.chrono] ++version = "0.4" ++optional = true ++ ++[dependencies.core-foundation-sys] ++version = "0.6.1" ++ ++[dependencies.libc] ++version = "0.2" ++ ++[dependencies.uuid] ++version = "0.5" ++optional = true ++ ++[features] ++mac_os_10_7_support = ["core-foundation-sys/mac_os_10_7_support"] ++mac_os_10_8_features = ["core-foundation-sys/mac_os_10_8_features"] ++with-chrono = ["chrono"] ++with-uuid = ["uuid"] diff --cc vendor/core-foundation-0.6.1/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/core-foundation-0.6.1/LICENSE-MIT index 000000000,000000000..807526f57 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2012-2013 Mozilla Foundation ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/core-foundation-0.6.1/src/array.rs index 000000000,000000000..516b1dab7 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/array.rs @@@ -1,0 -1,0 +1,269 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Heterogeneous immutable arrays. ++ ++pub use core_foundation_sys::array::*; ++pub use core_foundation_sys::base::CFIndex; ++use core_foundation_sys::base::{CFTypeRef, CFRelease, kCFAllocatorDefault}; ++use std::mem; ++use std::marker::PhantomData; ++use std::os::raw::c_void; ++use ConcreteCFType; ++ ++use base::{CFIndexConvertible, TCFType, CFRange}; ++use base::{FromVoid, ItemRef}; ++ ++/// A heterogeneous immutable array. ++pub struct CFArray(CFArrayRef, PhantomData); ++ ++impl Drop for CFArray { ++ fn drop(&mut self) { ++ unsafe { CFRelease(self.as_CFTypeRef()) } ++ } ++} ++ ++pub struct CFArrayIterator<'a, T: 'a> { ++ array: &'a CFArray, ++ index: CFIndex, ++ len: CFIndex, ++} ++ ++impl<'a, T: FromVoid> Iterator for CFArrayIterator<'a, T> { ++ type Item = ItemRef<'a, T>; ++ ++ fn next(&mut self) -> Option> { ++ if self.index >= self.len { ++ None ++ } else { ++ let value = unsafe { self.array.get_unchecked(self.index) }; ++ self.index += 1; ++ Some(value) ++ } ++ } ++} ++ ++impl<'a, T: FromVoid> ExactSizeIterator for CFArrayIterator<'a, T> { ++ fn len(&self) -> usize { ++ (self.array.len() - self.index) as usize ++ } ++} ++ ++impl_TCFType!(CFArray, CFArrayRef, CFArrayGetTypeID); ++impl_CFTypeDescription!(CFArray); ++ ++unsafe impl ConcreteCFType for CFArray<*const c_void> {} ++ ++impl CFArray { ++ /// Creates a new `CFArray` with the given elements, which must be `CFType` objects. ++ pub fn from_CFTypes(elems: &[T]) -> CFArray where T: TCFType { ++ unsafe { ++ let elems: Vec = elems.iter().map(|elem| elem.as_CFTypeRef()).collect(); ++ let array_ref = CFArrayCreate(kCFAllocatorDefault, ++ mem::transmute(elems.as_ptr()), ++ elems.len().to_CFIndex(), ++ &kCFTypeArrayCallBacks); ++ TCFType::wrap_under_create_rule(array_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn to_untyped(&self) -> CFArray { ++ unsafe { CFArray::wrap_under_get_rule(self.0) } ++ } ++ ++ /// Returns the same array, but with the type reset to void pointers. ++ /// Equal to `to_untyped`, but is faster since it does not increment the retain count. ++ #[inline] ++ pub fn into_untyped(self) -> CFArray { ++ let reference = self.0; ++ mem::forget(self); ++ unsafe { CFArray::wrap_under_create_rule(reference) } ++ } ++ ++ /// Iterates over the elements of this `CFArray`. ++ /// ++ /// Careful; the loop body must wrap the reference properly. Generally, when array elements are ++ /// Core Foundation objects (not always true), they need to be wrapped with ++ /// `TCFType::wrap_under_get_rule()`. ++ #[inline] ++ pub fn iter<'a>(&'a self) -> CFArrayIterator<'a, T> { ++ CFArrayIterator { ++ array: self, ++ index: 0, ++ len: self.len(), ++ } ++ } ++ ++ #[inline] ++ pub fn len(&self) -> CFIndex { ++ unsafe { ++ CFArrayGetCount(self.0) ++ } ++ } ++ ++ #[inline] ++ pub unsafe fn get_unchecked<'a>(&'a self, index: CFIndex) -> ItemRef<'a, T> where T: FromVoid { ++ T::from_void(CFArrayGetValueAtIndex(self.0, index)) ++ } ++ ++ #[inline] ++ pub fn get<'a>(&'a self, index: CFIndex) -> Option> where T: FromVoid { ++ if index < self.len() { ++ Some(unsafe { T::from_void(CFArrayGetValueAtIndex(self.0, index)) } ) ++ } else { ++ None ++ } ++ } ++ ++ pub fn get_values(&self, range: CFRange) -> Vec<*const c_void> { ++ let mut vec = Vec::with_capacity(range.length as usize); ++ unsafe { ++ CFArrayGetValues(self.0, range, vec.as_mut_ptr()); ++ vec.set_len(range.length as usize); ++ vec ++ } ++ } ++ ++ pub fn get_all_values(&self) -> Vec<*const c_void> { ++ self.get_values(CFRange { ++ location: 0, ++ length: self.len() ++ }) ++ } ++} ++ ++impl<'a, T: FromVoid> IntoIterator for &'a CFArray { ++ type Item = ItemRef<'a, T>; ++ type IntoIter = CFArrayIterator<'a, T>; ++ ++ fn into_iter(self) -> CFArrayIterator<'a, T> { ++ self.iter() ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use std::mem; ++ use base::CFType; ++ ++ #[test] ++ fn to_untyped_correct_retain_count() { ++ let array = CFArray::::from_CFTypes(&[]); ++ assert_eq!(array.retain_count(), 1); ++ ++ let untyped_array = array.to_untyped(); ++ assert_eq!(array.retain_count(), 2); ++ assert_eq!(untyped_array.retain_count(), 2); ++ ++ mem::drop(array); ++ assert_eq!(untyped_array.retain_count(), 1); ++ } ++ ++ #[test] ++ fn into_untyped() { ++ let array = CFArray::::from_CFTypes(&[]); ++ let array2 = array.to_untyped(); ++ assert_eq!(array.retain_count(), 2); ++ ++ let untyped_array = array.into_untyped(); ++ assert_eq!(untyped_array.retain_count(), 2); ++ ++ mem::drop(array2); ++ assert_eq!(untyped_array.retain_count(), 1); ++ } ++ ++ #[test] ++ fn borrow() { ++ use string::CFString; ++ ++ let string = CFString::from_static_string("bar"); ++ assert_eq!(string.retain_count(), 1); ++ let x; ++ { ++ let arr: CFArray = CFArray::from_CFTypes(&[string]); ++ { ++ let p = arr.get(0).unwrap(); ++ assert_eq!(p.retain_count(), 1); ++ } ++ { ++ x = arr.get(0).unwrap().clone(); ++ assert_eq!(x.retain_count(), 2); ++ assert_eq!(x.to_string(), "bar"); ++ } ++ } ++ assert_eq!(x.retain_count(), 1); ++ } ++ ++ #[test] ++ fn iter_untyped_array() { ++ use string::{CFString, CFStringRef}; ++ use base::TCFTypeRef; ++ ++ let cf_string = CFString::from_static_string("bar"); ++ let array: CFArray = CFArray::from_CFTypes(&[cf_string.clone()]).into_untyped(); ++ ++ let cf_strings = array.iter().map(|ptr| { ++ unsafe { CFString::wrap_under_get_rule(CFStringRef::from_void_ptr(*ptr)) } ++ }).collect::>(); ++ let strings = cf_strings.iter().map(|s| s.to_string()).collect::>(); ++ assert_eq!(cf_string.retain_count(), 3); ++ assert_eq!(&strings[..], &["bar"]); ++ } ++ ++ #[test] ++ fn should_box_and_unbox() { ++ use number::CFNumber; ++ ++ let n0 = CFNumber::from(0); ++ let n1 = CFNumber::from(1); ++ let n2 = CFNumber::from(2); ++ let n3 = CFNumber::from(3); ++ let n4 = CFNumber::from(4); ++ let n5 = CFNumber::from(5); ++ ++ let arr = CFArray::from_CFTypes(&[ ++ n0.as_CFType(), ++ n1.as_CFType(), ++ n2.as_CFType(), ++ n3.as_CFType(), ++ n4.as_CFType(), ++ n5.as_CFType(), ++ ]); ++ ++ assert!(arr.get_all_values() == &[n0.as_CFTypeRef(), ++ n1.as_CFTypeRef(), ++ n2.as_CFTypeRef(), ++ n3.as_CFTypeRef(), ++ n4.as_CFTypeRef(), ++ n5.as_CFTypeRef()]); ++ ++ let mut sum = 0; ++ ++ let mut iter = arr.iter(); ++ assert_eq!(iter.len(), 6); ++ assert!(iter.next().is_some()); ++ assert_eq!(iter.len(), 5); ++ ++ for elem in iter { ++ let number: CFNumber = elem.downcast::().unwrap(); ++ sum += number.to_i64().unwrap() ++ } ++ ++ assert!(sum == 15); ++ ++ for elem in arr.iter() { ++ let number: CFNumber = elem.downcast::().unwrap(); ++ sum += number.to_i64().unwrap() ++ } ++ ++ assert!(sum == 30); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/attributed_string.rs index 000000000,000000000..dea20fcf6 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/attributed_string.rs @@@ -1,0 -1,0 +1,79 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++pub use core_foundation_sys::attributed_string::*; ++ ++use base::TCFType; ++use core_foundation_sys::base::{CFIndex, CFRange, kCFAllocatorDefault}; ++use std::ptr::null; ++use string::{CFString, CFStringRef}; ++ ++declare_TCFType!{ ++ CFAttributedString, CFAttributedStringRef ++} ++impl_TCFType!(CFAttributedString, CFAttributedStringRef, CFAttributedStringGetTypeID); ++ ++impl CFAttributedString { ++ #[inline] ++ pub fn new(string: &CFString) -> Self { ++ unsafe { ++ let astr_ref = CFAttributedStringCreate( ++ kCFAllocatorDefault, string.as_concrete_TypeRef(), null()); ++ ++ CFAttributedString::wrap_under_create_rule(astr_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn char_len(&self) -> CFIndex { ++ unsafe { ++ CFAttributedStringGetLength(self.0) ++ } ++ } ++} ++ ++declare_TCFType!{ ++ CFMutableAttributedString, CFMutableAttributedStringRef ++} ++impl_TCFType!(CFMutableAttributedString, CFMutableAttributedStringRef, CFMutableAttributedStringGetTypeID); ++ ++impl CFMutableAttributedString { ++ #[inline] ++ pub fn new() -> Self { ++ unsafe { ++ let astr_ref = CFAttributedStringCreateMutable( ++ kCFAllocatorDefault, 0); ++ ++ CFMutableAttributedString::wrap_under_create_rule(astr_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn char_len(&self) -> CFIndex { ++ unsafe { ++ CFAttributedStringGetLength(self.0) ++ } ++ } ++ ++ #[inline] ++ pub fn replace_str(&mut self, string: &CFString, range: CFRange) { ++ unsafe { ++ CFAttributedStringReplaceString( ++ self.0, range, string.as_concrete_TypeRef()); ++ } ++ } ++ ++ #[inline] ++ pub fn set_attribute(&mut self, range: CFRange, name: CFStringRef, value: T) { ++ unsafe { ++ CFAttributedStringSetAttribute( ++ self.0, range, name, value.as_CFTypeRef()); ++ } ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/base.rs index 000000000,000000000..6f4b64ad9 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/base.rs @@@ -1,0 -1,0 +1,391 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std; ++use std::fmt; ++use std::marker::PhantomData; ++use std::mem; ++use std::mem::ManuallyDrop; ++use std::ops::Deref; ++use std::os::raw::c_void; ++ ++pub use core_foundation_sys::base::*; ++ ++use string::CFString; ++use ConcreteCFType; ++ ++pub trait CFIndexConvertible { ++ /// Always use this method to construct a `CFIndex` value. It performs bounds checking to ++ /// ensure the value is in range. ++ fn to_CFIndex(self) -> CFIndex; ++} ++ ++impl CFIndexConvertible for usize { ++ #[inline] ++ fn to_CFIndex(self) -> CFIndex { ++ let max_CFIndex = CFIndex::max_value(); ++ if self > (max_CFIndex as usize) { ++ panic!("value out of range") ++ } ++ self as CFIndex ++ } ++} ++ ++declare_TCFType!{ ++ /// Superclass of all Core Foundation objects. ++ CFType, CFTypeRef ++} ++ ++impl CFType { ++ /// Try to downcast the `CFType` to a subclass. Checking if the instance is the ++ /// correct subclass happens at runtime and `None` is returned if it is not the correct type. ++ /// Works similar to [`Box::downcast`] and [`CFPropertyList::downcast`]. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// # use core_foundation::string::CFString; ++ /// # use core_foundation::boolean::CFBoolean; ++ /// # use core_foundation::base::{CFType, TCFType}; ++ /// # ++ /// // Create a string. ++ /// let string: CFString = CFString::from_static_string("FooBar"); ++ /// // Cast it up to a CFType. ++ /// let cf_type: CFType = string.as_CFType(); ++ /// // Cast it down again. ++ /// assert!(cf_type.downcast::().unwrap().to_string() == "FooBar"); ++ /// // Casting it to some other type will yield `None` ++ /// assert!(cf_type.downcast::().is_none()); ++ /// ``` ++ /// ++ /// ```compile_fail ++ /// # use core_foundation::array::CFArray; ++ /// # use core_foundation::base::TCFType; ++ /// # use core_foundation::boolean::CFBoolean; ++ /// # use core_foundation::string::CFString; ++ /// # ++ /// let boolean_array = CFArray::from_CFTypes(&[CFBoolean::true_value()]).into_CFType(); ++ /// ++ /// // This downcast is not allowed and causes compiler error, since it would cause undefined ++ /// // behavior to access the elements of the array as a CFString: ++ /// let invalid_string_array = boolean_array ++ /// .downcast_into::>() ++ /// .unwrap(); ++ /// ``` ++ /// ++ /// [`Box::downcast`]: https://doc.rust-lang.org/std/boxed/struct.Box.html#method.downcast ++ /// [`CFPropertyList::downcast`]: ../propertylist/struct.CFPropertyList.html#method.downcast ++ #[inline] ++ pub fn downcast(&self) -> Option { ++ if self.instance_of::() { ++ unsafe { ++ let reference = T::Ref::from_void_ptr(self.0); ++ Some(T::wrap_under_get_rule(reference)) ++ } ++ } else { ++ None ++ } ++ } ++ ++ /// Similar to [`downcast`], but consumes self and can thus avoid touching the retain count. ++ /// ++ /// [`downcast`]: #method.downcast ++ #[inline] ++ pub fn downcast_into(self) -> Option { ++ if self.instance_of::() { ++ unsafe { ++ let reference = T::Ref::from_void_ptr(self.0); ++ mem::forget(self); ++ Some(T::wrap_under_create_rule(reference)) ++ } ++ } else { ++ None ++ } ++ } ++} ++ ++impl fmt::Debug for CFType { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let desc = unsafe { ++ CFString::wrap_under_create_rule(CFCopyDescription(self.0)) ++ }; ++ desc.fmt(f) ++ } ++} ++ ++impl Clone for CFType { ++ #[inline] ++ fn clone(&self) -> CFType { ++ unsafe { ++ TCFType::wrap_under_get_rule(self.0) ++ } ++ } ++} ++ ++impl PartialEq for CFType { ++ #[inline] ++ fn eq(&self, other: &CFType) -> bool { ++ unsafe { ++ CFEqual(self.as_CFTypeRef(), other.as_CFTypeRef()) != 0 ++ } ++ } ++} ++ ++declare_TCFType!(CFAllocator, CFAllocatorRef); ++impl_TCFType!(CFAllocator, CFAllocatorRef, CFAllocatorGetTypeID); ++ ++impl CFAllocator { ++ #[inline] ++ pub fn new(mut context: CFAllocatorContext) -> CFAllocator { ++ unsafe { ++ let allocator_ref = CFAllocatorCreate(kCFAllocatorDefault, &mut context); ++ TCFType::wrap_under_create_rule(allocator_ref) ++ } ++ } ++} ++ ++ ++/// All Core Foundation types implement this trait. The associated type `Ref` specifies the ++/// associated Core Foundation type: e.g. for `CFType` this is `CFTypeRef`; for `CFArray` this is ++/// `CFArrayRef`. ++pub trait TCFType { ++ /// The reference type wrapped inside this type. ++ type Ref: TCFTypeRef; ++ ++ /// Returns the object as its concrete TypeRef. ++ fn as_concrete_TypeRef(&self) -> Self::Ref; ++ ++ /// Returns an instance of the object, wrapping the underlying `CFTypeRef` subclass. Use this ++ /// when following Core Foundation's "Create Rule". The reference count is *not* bumped. ++ unsafe fn wrap_under_create_rule(obj: Self::Ref) -> Self; ++ ++ /// Returns the type ID for this class. ++ fn type_id() -> CFTypeID; ++ ++ /// Returns the object as a wrapped `CFType`. The reference count is incremented by one. ++ #[inline] ++ fn as_CFType(&self) -> CFType { ++ unsafe { ++ TCFType::wrap_under_get_rule(self.as_CFTypeRef()) ++ } ++ } ++ ++ /// Returns the object as a wrapped `CFType`. Consumes self and avoids changing the reference ++ /// count. ++ #[inline] ++ fn into_CFType(self) -> CFType ++ where ++ Self: Sized, ++ { ++ let reference = self.as_CFTypeRef(); ++ mem::forget(self); ++ unsafe { TCFType::wrap_under_create_rule(reference) } ++ } ++ ++ /// Returns the object as a raw `CFTypeRef`. The reference count is not adjusted. ++ fn as_CFTypeRef(&self) -> CFTypeRef; ++ ++ /// Returns an instance of the object, wrapping the underlying `CFTypeRef` subclass. Use this ++ /// when following Core Foundation's "Get Rule". The reference count *is* bumped. ++ unsafe fn wrap_under_get_rule(reference: Self::Ref) -> Self; ++ ++ /// Returns the reference count of the object. It is unwise to do anything other than test ++ /// whether the return value of this method is greater than zero. ++ #[inline] ++ fn retain_count(&self) -> CFIndex { ++ unsafe { ++ CFGetRetainCount(self.as_CFTypeRef()) ++ } ++ } ++ ++ /// Returns the type ID of this object. ++ #[inline] ++ fn type_of(&self) -> CFTypeID { ++ unsafe { ++ CFGetTypeID(self.as_CFTypeRef()) ++ } ++ } ++ ++ /// Writes a debugging version of this object on standard error. ++ fn show(&self) { ++ unsafe { ++ CFShow(self.as_CFTypeRef()) ++ } ++ } ++ ++ /// Returns true if this value is an instance of another type. ++ #[inline] ++ fn instance_of(&self) -> bool { ++ self.type_of() == OtherCFType::type_id() ++ } ++} ++ ++impl TCFType for CFType { ++ type Ref = CFTypeRef; ++ ++ #[inline] ++ fn as_concrete_TypeRef(&self) -> CFTypeRef { ++ self.0 ++ } ++ ++ #[inline] ++ unsafe fn wrap_under_get_rule(reference: CFTypeRef) -> CFType { ++ let reference: CFTypeRef = CFRetain(reference); ++ TCFType::wrap_under_create_rule(reference) ++ } ++ ++ #[inline] ++ fn as_CFTypeRef(&self) -> CFTypeRef { ++ self.as_concrete_TypeRef() ++ } ++ ++ #[inline] ++ unsafe fn wrap_under_create_rule(obj: CFTypeRef) -> CFType { ++ CFType(obj) ++ } ++ ++ #[inline] ++ fn type_id() -> CFTypeID { ++ // FIXME(pcwalton): Is this right? ++ 0 ++ } ++} ++ ++/// A reference to an element inside a container ++pub struct ItemRef<'a, T: 'a>(ManuallyDrop, PhantomData<&'a T>); ++ ++impl<'a, T> Deref for ItemRef<'a, T> { ++ type Target = T; ++ ++ fn deref(&self) -> &T { ++ &self.0 ++ } ++} ++ ++impl<'a, T: fmt::Debug> fmt::Debug for ItemRef<'a, T> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { ++ self.0.fmt(f) ++ } ++} ++ ++impl<'a, T: PartialEq> PartialEq for ItemRef<'a, T> { ++ fn eq(&self, other: &Self) -> bool { ++ self.0.eq(&other.0) ++ } ++} ++ ++/// A trait describing how to convert from the stored *const c_void to the desired T ++pub unsafe trait FromVoid { ++ unsafe fn from_void<'a>(x: *const c_void) -> ItemRef<'a, Self> where Self: std::marker::Sized; ++} ++ ++unsafe impl FromVoid for u32 { ++ unsafe fn from_void<'a>(x: *const c_void) -> ItemRef<'a, Self> { ++ // Functions like CGFontCopyTableTags treat the void*'s as u32's ++ // so we convert by casting directly ++ ItemRef(ManuallyDrop::new(x as u32), PhantomData) ++ } ++} ++ ++unsafe impl FromVoid for *const c_void { ++ unsafe fn from_void<'a>(x: *const c_void) -> ItemRef<'a, Self> { ++ ItemRef(ManuallyDrop::new(x), PhantomData) ++ } ++} ++ ++unsafe impl FromVoid for T { ++ unsafe fn from_void<'a>(x: *const c_void) -> ItemRef<'a, Self> { ++ ItemRef(ManuallyDrop::new(TCFType::wrap_under_create_rule(T::Ref::from_void_ptr(x))), PhantomData) ++ } ++} ++ ++/// A trait describing how to convert from the stored *const c_void to the desired T ++pub unsafe trait ToVoid { ++ fn to_void(&self) -> *const c_void; ++} ++ ++unsafe impl ToVoid<*const c_void> for *const c_void { ++ fn to_void(&self) -> *const c_void { ++ *self ++ } ++} ++ ++unsafe impl<'a> ToVoid for &'a CFType { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ self.as_concrete_TypeRef().as_void_ptr() ++ } ++} ++ ++unsafe impl ToVoid for CFType { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ self.as_concrete_TypeRef().as_void_ptr() ++ } ++} ++ ++unsafe impl ToVoid for CFTypeRef { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ self.as_void_ptr() ++ } ++} ++ ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use std::mem; ++ use boolean::CFBoolean; ++ ++ #[test] ++ fn cftype_instance_of() { ++ let string = CFString::from_static_string("foo"); ++ let cftype = string.as_CFType(); ++ ++ assert!(cftype.instance_of::()); ++ assert!(!cftype.instance_of::()); ++ } ++ ++ #[test] ++ fn as_cftype_retain_count() { ++ let string = CFString::from_static_string("bar"); ++ assert_eq!(string.retain_count(), 1); ++ let cftype = string.as_CFType(); ++ assert_eq!(cftype.retain_count(), 2); ++ mem::drop(string); ++ assert_eq!(cftype.retain_count(), 1); ++ } ++ ++ #[test] ++ fn into_cftype_retain_count() { ++ let string = CFString::from_static_string("bar"); ++ assert_eq!(string.retain_count(), 1); ++ let cftype = string.into_CFType(); ++ assert_eq!(cftype.retain_count(), 1); ++ } ++ ++ #[test] ++ fn as_cftype_and_downcast() { ++ let string = CFString::from_static_string("bar"); ++ let cftype = string.as_CFType(); ++ let string2 = cftype.downcast::().unwrap(); ++ assert_eq!(string2.to_string(), "bar"); ++ ++ assert_eq!(string.retain_count(), 3); ++ assert_eq!(cftype.retain_count(), 3); ++ assert_eq!(string2.retain_count(), 3); ++ } ++ ++ #[test] ++ fn into_cftype_and_downcast_into() { ++ let string = CFString::from_static_string("bar"); ++ let cftype = string.into_CFType(); ++ let string2 = cftype.downcast_into::().unwrap(); ++ assert_eq!(string2.to_string(), "bar"); ++ assert_eq!(string2.retain_count(), 1); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/boolean.rs index 000000000,000000000..8c13b907d new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/boolean.rs @@@ -1,0 -1,0 +1,70 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! A Boolean type. ++ ++pub use core_foundation_sys::number::{CFBooleanRef, CFBooleanGetTypeID, kCFBooleanTrue, kCFBooleanFalse}; ++ ++use base::TCFType; ++ ++ ++declare_TCFType!{ ++ /// A Boolean type. ++ /// ++ /// FIXME(pcwalton): Should be a newtype struct, but that fails due to a Rust compiler bug. ++ CFBoolean, CFBooleanRef ++} ++impl_TCFType!(CFBoolean, CFBooleanRef, CFBooleanGetTypeID); ++impl_CFTypeDescription!(CFBoolean); ++ ++impl CFBoolean { ++ pub fn true_value() -> CFBoolean { ++ unsafe { ++ TCFType::wrap_under_get_rule(kCFBooleanTrue) ++ } ++ } ++ ++ pub fn false_value() -> CFBoolean { ++ unsafe { ++ TCFType::wrap_under_get_rule(kCFBooleanFalse) ++ } ++ } ++} ++ ++impl From for CFBoolean { ++ fn from(value: bool) -> CFBoolean { ++ if value { ++ CFBoolean::true_value() ++ } else { ++ CFBoolean::false_value() ++ } ++ } ++} ++ ++impl From for bool { ++ fn from(value: CFBoolean) -> bool { ++ value.0 == unsafe { kCFBooleanTrue } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn to_and_from_bool() { ++ let b_false = CFBoolean::from(false); ++ let b_true = CFBoolean::from(true); ++ assert_ne!(b_false, b_true); ++ assert_eq!(b_false, CFBoolean::false_value()); ++ assert_eq!(b_true, CFBoolean::true_value()); ++ assert!(!bool::from(b_false)); ++ assert!(bool::from(b_true)); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/bundle.rs index 000000000,000000000..0e5909feb new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/bundle.rs @@@ -1,0 -1,0 +1,123 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation Bundle Type ++ ++pub use core_foundation_sys::bundle::*; ++use core_foundation_sys::base::kCFAllocatorDefault; ++ ++use base::{CFType, TCFType}; ++use url::CFURL; ++use dictionary::CFDictionary; ++use string::CFString; ++ ++ ++declare_TCFType!{ ++ /// A Bundle type. ++ CFBundle, CFBundleRef ++} ++impl_TCFType!(CFBundle, CFBundleRef, CFBundleGetTypeID); ++ ++impl CFBundle { ++ pub fn new(bundleURL: CFURL) -> Option { ++ unsafe { ++ let bundle_ref = CFBundleCreate(kCFAllocatorDefault, bundleURL.as_concrete_TypeRef()); ++ if bundle_ref.is_null() { ++ None ++ } else { ++ Some(TCFType::wrap_under_create_rule(bundle_ref)) ++ } ++ } ++ } ++ ++ pub fn main_bundle() -> CFBundle { ++ unsafe { ++ let bundle_ref = CFBundleGetMainBundle(); ++ TCFType::wrap_under_get_rule(bundle_ref) ++ } ++ } ++ ++ pub fn info_dictionary(&self) -> CFDictionary { ++ unsafe { ++ let info_dictionary = CFBundleGetInfoDictionary(self.0); ++ TCFType::wrap_under_get_rule(info_dictionary) ++ } ++ } ++ ++ pub fn executable_url(&self) -> Option { ++ unsafe { ++ let exe_url = CFBundleCopyExecutableURL(self.0); ++ if exe_url.is_null() { ++ None ++ } else { ++ Some(TCFType::wrap_under_create_rule(exe_url)) ++ } ++ } ++ } ++ ++ pub fn private_frameworks_url(&self) -> Option { ++ unsafe { ++ let fw_url = CFBundleCopyPrivateFrameworksURL(self.0); ++ if fw_url.is_null() { ++ None ++ } else { ++ Some(TCFType::wrap_under_create_rule(fw_url)) ++ } ++ } ++ } ++} ++ ++ ++#[test] ++fn safari_executable_url() { ++ use string::CFString; ++ use url::{CFURL, kCFURLPOSIXPathStyle}; ++ ++ let cfstr_path = CFString::from_static_string("/Applications/Safari.app"); ++ let cfurl_path = CFURL::from_file_system_path(cfstr_path, kCFURLPOSIXPathStyle, true); ++ let cfurl_executable = CFBundle::new(cfurl_path) ++ .expect("Safari not present") ++ .executable_url(); ++ assert!(cfurl_executable.is_some()); ++ assert_eq!(cfurl_executable ++ .unwrap() ++ .absolute() ++ .get_file_system_path(kCFURLPOSIXPathStyle) ++ .to_string(), ++ "/Applications/Safari.app/Contents/MacOS/Safari"); ++} ++ ++#[test] ++fn safari_private_frameworks_url() { ++ use string::CFString; ++ use url::{CFURL, kCFURLPOSIXPathStyle}; ++ ++ let cfstr_path = CFString::from_static_string("/Applications/Safari.app"); ++ let cfurl_path = CFURL::from_file_system_path(cfstr_path, kCFURLPOSIXPathStyle, true); ++ let cfurl_executable = CFBundle::new(cfurl_path) ++ .expect("Safari not present") ++ .private_frameworks_url(); ++ assert!(cfurl_executable.is_some()); ++ assert_eq!(cfurl_executable ++ .unwrap() ++ .absolute() ++ .get_file_system_path(kCFURLPOSIXPathStyle) ++ .to_string(), ++ "/Applications/Safari.app/Contents/Frameworks"); ++} ++ ++#[test] ++fn non_existant_bundle() { ++ use string::CFString; ++ use url::{CFURL, kCFURLPOSIXPathStyle}; ++ ++ let cfstr_path = CFString::from_static_string("/usr/local/foo"); ++ let cfurl_path = CFURL::from_file_system_path(cfstr_path, kCFURLPOSIXPathStyle, true); ++ assert!(CFBundle::new(cfurl_path).is_none()); ++} diff --cc vendor/core-foundation-0.6.1/src/data.rs index 000000000,000000000..2b5010a52 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/data.rs @@@ -1,0 -1,0 +1,63 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation byte buffers. ++ ++pub use core_foundation_sys::data::*; ++use core_foundation_sys::base::CFIndex; ++use core_foundation_sys::base::{kCFAllocatorDefault}; ++use std::ops::Deref; ++use std::slice; ++ ++use base::{CFIndexConvertible, TCFType}; ++ ++ ++declare_TCFType!{ ++ /// A byte buffer. ++ CFData, CFDataRef ++} ++impl_TCFType!(CFData, CFDataRef, CFDataGetTypeID); ++impl_CFTypeDescription!(CFData); ++ ++impl CFData { ++ pub fn from_buffer(buffer: &[u8]) -> CFData { ++ unsafe { ++ let data_ref = CFDataCreate(kCFAllocatorDefault, ++ buffer.as_ptr(), ++ buffer.len().to_CFIndex()); ++ TCFType::wrap_under_create_rule(data_ref) ++ } ++ } ++ ++ /// Returns a pointer to the underlying bytes in this data. Note that this byte buffer is ++ /// read-only. ++ #[inline] ++ pub fn bytes<'a>(&'a self) -> &'a [u8] { ++ unsafe { ++ slice::from_raw_parts(CFDataGetBytePtr(self.0), self.len() as usize) ++ } ++ } ++ ++ /// Returns the length of this byte buffer. ++ #[inline] ++ pub fn len(&self) -> CFIndex { ++ unsafe { ++ CFDataGetLength(self.0) ++ } ++ } ++} ++ ++impl Deref for CFData { ++ type Target = [u8]; ++ ++ #[inline] ++ fn deref(&self) -> &[u8] { ++ self.bytes() ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/date.rs index 000000000,000000000..57ee7211e new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/date.rs @@@ -1,0 -1,0 +1,130 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation date objects. ++ ++pub use core_foundation_sys::date::*; ++use core_foundation_sys::base::kCFAllocatorDefault; ++ ++use base::TCFType; ++ ++#[cfg(feature = "with-chrono")] ++use chrono::NaiveDateTime; ++ ++ ++declare_TCFType!{ ++ /// A date. ++ CFDate, CFDateRef ++} ++impl_TCFType!(CFDate, CFDateRef, CFDateGetTypeID); ++impl_CFTypeDescription!(CFDate); ++impl_CFComparison!(CFDate, CFDateCompare); ++ ++impl CFDate { ++ #[inline] ++ pub fn new(time: CFAbsoluteTime) -> CFDate { ++ unsafe { ++ let date_ref = CFDateCreate(kCFAllocatorDefault, time); ++ TCFType::wrap_under_create_rule(date_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn now() -> CFDate { ++ CFDate::new(unsafe { CFAbsoluteTimeGetCurrent() }) ++ } ++ ++ #[inline] ++ pub fn abs_time(&self) -> CFAbsoluteTime { ++ unsafe { ++ CFDateGetAbsoluteTime(self.0) ++ } ++ } ++ ++ #[cfg(feature = "with-chrono")] ++ pub fn naive_utc(&self) -> NaiveDateTime { ++ let ts = unsafe { ++ self.abs_time() + kCFAbsoluteTimeIntervalSince1970 ++ }; ++ let (secs, nanos) = if ts.is_sign_positive() { ++ (ts.trunc() as i64, ts.fract()) ++ } else { ++ // nanoseconds can't be negative in NaiveDateTime ++ (ts.trunc() as i64 - 1, 1.0 - ts.fract().abs()) ++ }; ++ NaiveDateTime::from_timestamp(secs, (nanos * 1e9).floor() as u32) ++ } ++ ++ #[cfg(feature = "with-chrono")] ++ pub fn from_naive_utc(time: NaiveDateTime) -> CFDate { ++ let secs = time.timestamp(); ++ let nanos = time.timestamp_subsec_nanos(); ++ let ts = unsafe { ++ secs as f64 + (nanos as f64 / 1e9) - kCFAbsoluteTimeIntervalSince1970 ++ }; ++ CFDate::new(ts) ++ } ++} ++ ++#[cfg(test)] ++mod test { ++ use super::CFDate; ++ use std::cmp::Ordering; ++ ++ #[cfg(feature = "with-chrono")] ++ use chrono::NaiveDateTime; ++ ++ #[cfg(feature = "with-chrono")] ++ fn approx_eq(a: f64, b: f64) -> bool { ++ use std::f64; ++ ++ let same_sign = a.is_sign_positive() == b.is_sign_positive(); ++ let equal = ((a - b).abs() / f64::min(a.abs() + b.abs(), f64::MAX)) < f64::EPSILON; ++ (same_sign && equal) ++ } ++ ++ #[test] ++ fn date_comparison() { ++ let now = CFDate::now(); ++ let past = CFDate::new(now.abs_time() - 1.0); ++ assert_eq!(now.cmp(&past), Ordering::Greater); ++ assert_eq!(now.cmp(&now), Ordering::Equal); ++ assert_eq!(past.cmp(&now), Ordering::Less); ++ } ++ ++ #[test] ++ fn date_equality() { ++ let now = CFDate::now(); ++ let same_time = CFDate::new(now.abs_time()); ++ assert_eq!(now, same_time); ++ } ++ ++ #[test] ++ #[cfg(feature = "with-chrono")] ++ fn date_chrono_conversion_positive() { ++ let date = CFDate::now(); ++ let datetime = date.naive_utc(); ++ let converted = CFDate::from_naive_utc(datetime); ++ assert!(approx_eq(date.abs_time(), converted.abs_time())); ++ } ++ ++ #[test] ++ #[cfg(feature = "with-chrono")] ++ fn date_chrono_conversion_negative() { ++ use super::kCFAbsoluteTimeIntervalSince1970; ++ ++ let ts = unsafe { ++ kCFAbsoluteTimeIntervalSince1970 - 420.0 ++ }; ++ let date = CFDate::new(ts); ++ let datetime: NaiveDateTime = date.naive_utc(); ++ let converted = CFDate::from_naive_utc(datetime); ++ assert!(approx_eq(date.abs_time(), converted.abs_time())); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/dictionary.rs index 000000000,000000000..10b9158bd new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/dictionary.rs @@@ -1,0 -1,0 +1,316 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Dictionaries of key-value pairs. ++ ++pub use core_foundation_sys::dictionary::*; ++ ++use core_foundation_sys::base::{CFTypeRef, CFRelease, kCFAllocatorDefault}; ++use std::mem; ++use std::os::raw::c_void; ++use std::ptr; ++use std::marker::PhantomData; ++ ++use base::{ItemRef, FromVoid, ToVoid}; ++use base::{CFIndexConvertible, TCFType}; ++ ++// consume the type parameters with PhantomDatas ++pub struct CFDictionary(CFDictionaryRef, PhantomData, PhantomData); ++ ++impl Drop for CFDictionary { ++ fn drop(&mut self) { ++ unsafe { CFRelease(self.as_CFTypeRef()) } ++ } ++} ++ ++impl_TCFType!(CFDictionary, CFDictionaryRef, CFDictionaryGetTypeID); ++impl_CFTypeDescription!(CFDictionary); ++ ++impl CFDictionary { ++ pub fn from_CFType_pairs(pairs: &[(K, V)]) -> CFDictionary where K: TCFType, V: TCFType { ++ let (keys, values): (Vec, Vec) = pairs ++ .iter() ++ .map(|&(ref key, ref value)| (key.as_CFTypeRef(), value.as_CFTypeRef())) ++ .unzip(); ++ ++ unsafe { ++ let dictionary_ref = CFDictionaryCreate(kCFAllocatorDefault, ++ mem::transmute(keys.as_ptr()), ++ mem::transmute(values.as_ptr()), ++ keys.len().to_CFIndex(), ++ &kCFTypeDictionaryKeyCallBacks, ++ &kCFTypeDictionaryValueCallBacks); ++ TCFType::wrap_under_create_rule(dictionary_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn len(&self) -> usize { ++ unsafe { ++ CFDictionaryGetCount(self.0) as usize ++ } ++ } ++ ++ #[inline] ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ #[inline] ++ pub fn contains_key(&self, key: &K) -> bool where K: ToVoid { ++ unsafe { CFDictionaryContainsKey(self.0, key.to_void()) != 0 } ++ } ++ ++ #[inline] ++ pub fn find<'a, T: ToVoid>(&'a self, key: T) -> Option> where V: FromVoid, K: ToVoid { ++ unsafe { ++ let mut value: *const c_void = ptr::null(); ++ if CFDictionaryGetValueIfPresent(self.0, key.to_void(), &mut value) != 0 { ++ Some(V::from_void(value)) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// # Panics ++ /// ++ /// Panics if the key is not present in the dictionary. Use `find` to get an `Option` instead ++ /// of panicking. ++ #[inline] ++ pub fn get<'a, T: ToVoid>(&'a self, key: T) -> ItemRef<'a, V> where V: FromVoid, K: ToVoid { ++ let ptr = key.to_void(); ++ self.find(key).expect(&format!("No entry found for key {:p}", ptr)) ++ } ++ ++ pub fn get_keys_and_values(&self) -> (Vec<*const c_void>, Vec<*const c_void>) { ++ let length = self.len(); ++ let mut keys = Vec::with_capacity(length); ++ let mut values = Vec::with_capacity(length); ++ ++ unsafe { ++ CFDictionaryGetKeysAndValues(self.0, keys.as_mut_ptr(), values.as_mut_ptr()); ++ keys.set_len(length); ++ values.set_len(length); ++ } ++ ++ (keys, values) ++ } ++} ++ ++// consume the type parameters with PhantomDatas ++pub struct CFMutableDictionary(CFMutableDictionaryRef, PhantomData, PhantomData); ++ ++impl Drop for CFMutableDictionary { ++ fn drop(&mut self) { ++ unsafe { CFRelease(self.as_CFTypeRef()) } ++ } ++} ++ ++impl_TCFType!(CFMutableDictionary, CFMutableDictionaryRef, CFDictionaryGetTypeID); ++impl_CFTypeDescription!(CFMutableDictionary); ++ ++impl CFMutableDictionary { ++ pub fn new() -> Self { ++ Self::with_capacity(0) ++ } ++ ++ pub fn with_capacity(capacity: isize) -> Self { ++ unsafe { ++ let dictionary_ref = CFDictionaryCreateMutable(kCFAllocatorDefault, ++ capacity as _, ++ &kCFTypeDictionaryKeyCallBacks, ++ &kCFTypeDictionaryValueCallBacks); ++ TCFType::wrap_under_create_rule(dictionary_ref) ++ } ++ } ++ ++ pub fn copy_with_capacity(&self, capacity: isize) -> Self { ++ unsafe { ++ let dictionary_ref = CFDictionaryCreateMutableCopy(kCFAllocatorDefault, capacity as _, self.0); ++ TCFType::wrap_under_get_rule(dictionary_ref) ++ } ++ } ++ ++ pub fn from_CFType_pairs(pairs: &[(K, V)]) -> CFMutableDictionary where K: ToVoid, V: ToVoid { ++ let mut result = Self::with_capacity(pairs.len() as _); ++ for &(ref key, ref value) in pairs { ++ result.add(key, value); ++ } ++ result ++ } ++ ++ // Immutable interface ++ ++ #[inline] ++ pub fn len(&self) -> usize { ++ unsafe { ++ CFDictionaryGetCount(self.0) as usize ++ } ++ } ++ ++ #[inline] ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ #[inline] ++ pub fn contains_key(&self, key: *const c_void) -> bool { ++ unsafe { ++ CFDictionaryContainsKey(self.0, key) != 0 ++ } ++ } ++ ++ #[inline] ++ pub fn find<'a>(&'a self, key: &K) -> Option> where V: FromVoid, K: ToVoid { ++ unsafe { ++ let mut value: *const c_void = ptr::null(); ++ if CFDictionaryGetValueIfPresent(self.0, key.to_void(), &mut value) != 0 { ++ Some(V::from_void(value)) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// # Panics ++ /// ++ /// Panics if the key is not present in the dictionary. Use `find` to get an `Option` instead ++ /// of panicking. ++ #[inline] ++ pub fn get<'a>(&'a self, key: &K) -> ItemRef<'a, V> where V: FromVoid, K: ToVoid { ++ let ptr = key.to_void(); ++ self.find(&key).expect(&format!("No entry found for key {:p}", ptr)) ++ } ++ ++ pub fn get_keys_and_values(&self) -> (Vec<*const c_void>, Vec<*const c_void>) { ++ let length = self.len(); ++ let mut keys = Vec::with_capacity(length); ++ let mut values = Vec::with_capacity(length); ++ ++ unsafe { ++ CFDictionaryGetKeysAndValues(self.0, keys.as_mut_ptr(), values.as_mut_ptr()); ++ keys.set_len(length); ++ values.set_len(length); ++ } ++ ++ (keys, values) ++ } ++ ++ // Mutable interface ++ ++ /// Adds the key-value pair to the dictionary if no such key already exist. ++ #[inline] ++ pub fn add(&mut self, key: &K, value: &V) where K: ToVoid, V: ToVoid { ++ unsafe { CFDictionaryAddValue(self.0, key.to_void(), value.to_void()) } ++ } ++ ++ /// Sets the value of the key in the dictionary. ++ #[inline] ++ pub fn set(&mut self, key: K, value: V) where K: ToVoid, V: ToVoid { ++ unsafe { CFDictionarySetValue(self.0, key.to_void(), value.to_void()) } ++ } ++ ++ /// Replaces the value of the key in the dictionary. ++ #[inline] ++ pub fn replace(&mut self, key: K, value: V) where K: ToVoid, V: ToVoid { ++ unsafe { CFDictionaryReplaceValue(self.0, key.to_void(), value.to_void()) } ++ } ++ ++ /// Removes the value of the key from the dictionary. ++ #[inline] ++ pub fn remove(&mut self, key: K) where K: ToVoid { ++ unsafe { CFDictionaryRemoveValue(self.0, key.to_void()) } ++ } ++ ++ #[inline] ++ pub fn remove_all(&mut self) { ++ unsafe { CFDictionaryRemoveAllValues(self.0) } ++ } ++} ++ ++ ++#[cfg(test)] ++pub mod test { ++ use super::*; ++ use base::{CFType, TCFType}; ++ use boolean::CFBoolean; ++ use number::CFNumber; ++ use string::CFString; ++ ++ ++ #[test] ++ fn dictionary() { ++ let bar = CFString::from_static_string("Bar"); ++ let baz = CFString::from_static_string("Baz"); ++ let boo = CFString::from_static_string("Boo"); ++ let foo = CFString::from_static_string("Foo"); ++ let tru = CFBoolean::true_value(); ++ let n42 = CFNumber::from(42); ++ ++ let d = CFDictionary::from_CFType_pairs(&[ ++ (bar.as_CFType(), boo.as_CFType()), ++ (baz.as_CFType(), tru.as_CFType()), ++ (foo.as_CFType(), n42.as_CFType()), ++ ]); ++ ++ let (v1, v2) = d.get_keys_and_values(); ++ assert!(v1 == &[bar.as_CFTypeRef(), baz.as_CFTypeRef(), foo.as_CFTypeRef()]); ++ assert!(v2 == &[boo.as_CFTypeRef(), tru.as_CFTypeRef(), n42.as_CFTypeRef()]); ++ } ++ ++ #[test] ++ fn mutable_dictionary() { ++ let bar = CFString::from_static_string("Bar"); ++ let baz = CFString::from_static_string("Baz"); ++ let boo = CFString::from_static_string("Boo"); ++ let foo = CFString::from_static_string("Foo"); ++ let tru = CFBoolean::true_value(); ++ let n42 = CFNumber::from(42); ++ ++ let mut d = CFMutableDictionary::::new(); ++ d.add(&bar, &boo.as_CFType()); ++ d.add(&baz, &tru.as_CFType()); ++ d.add(&foo, &n42.as_CFType()); ++ assert_eq!(d.len(), 3); ++ ++ let (v1, v2) = d.get_keys_and_values(); ++ assert!(v1 == &[bar.as_CFTypeRef(), baz.as_CFTypeRef(), foo.as_CFTypeRef()]); ++ assert!(v2 == &[boo.as_CFTypeRef(), tru.as_CFTypeRef(), n42.as_CFTypeRef()]); ++ ++ d.remove(baz); ++ assert_eq!(d.len(), 2); ++ ++ let (v1, v2) = d.get_keys_and_values(); ++ assert!(v1 == &[bar.as_CFTypeRef(), foo.as_CFTypeRef()]); ++ assert!(v2 == &[boo.as_CFTypeRef(), n42.as_CFTypeRef()]); ++ ++ d.remove_all(); ++ assert_eq!(d.len(), 0) ++ } ++ ++ #[test] ++ fn dict_find_and_contains_key() { ++ let dict = CFDictionary::from_CFType_pairs(&[ ++ ( ++ CFString::from_static_string("hello"), ++ CFBoolean::true_value(), ++ ), ++ ]); ++ let key = CFString::from_static_string("hello"); ++ let invalid_key = CFString::from_static_string("foobar"); ++ ++ assert!(dict.contains_key(&key)); ++ assert!(!dict.contains_key(&invalid_key)); ++ ++ let value = dict.find(&key).unwrap().clone(); ++ assert_eq!(value, CFBoolean::true_value()); ++ assert_eq!(dict.find(&invalid_key), None); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/error.rs index 000000000,000000000..f100171bc new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/error.rs @@@ -1,0 -1,0 +1,71 @@@ ++// Copyright 2016 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation errors. ++ ++pub use core_foundation_sys::error::*; ++ ++use std::error::Error; ++use std::fmt; ++ ++use base::{CFIndex, TCFType}; ++use string::CFString; ++ ++ ++declare_TCFType!{ ++ /// An error value. ++ CFError, CFErrorRef ++} ++impl_TCFType!(CFError, CFErrorRef, CFErrorGetTypeID); ++ ++impl fmt::Debug for CFError { ++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { ++ fmt.debug_struct("CFError") ++ .field("domain", &self.domain()) ++ .field("code", &self.code()) ++ .field("description", &self.description()) ++ .finish() ++ } ++} ++ ++impl fmt::Display for CFError { ++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { ++ write!(fmt, "{}", self.description()) ++ } ++} ++ ++impl Error for CFError { ++ fn description(&self) -> &str { ++ "a Core Foundation error" ++ } ++} ++ ++impl CFError { ++ /// Returns a string identifying the domain with which this error is ++ /// associated. ++ pub fn domain(&self) -> CFString { ++ unsafe { ++ let s = CFErrorGetDomain(self.0); ++ CFString::wrap_under_get_rule(s) ++ } ++ } ++ ++ /// Returns the code identifying this type of error. ++ pub fn code(&self) -> CFIndex { ++ unsafe { CFErrorGetCode(self.0) } ++ } ++ ++ /// Returns a human-presentable description of the error. ++ pub fn description(&self) -> CFString { ++ unsafe { ++ let s = CFErrorCopyDescription(self.0); ++ CFString::wrap_under_create_rule(s) ++ } ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/filedescriptor.rs index 000000000,000000000..2c999b519 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/filedescriptor.rs @@@ -1,0 -1,0 +1,210 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++pub use core_foundation_sys::filedescriptor::*; ++ ++use core_foundation_sys::base::{Boolean, CFIndex}; ++use core_foundation_sys::base::{kCFAllocatorDefault, CFOptionFlags}; ++ ++use base::TCFType; ++use runloop::CFRunLoopSource; ++ ++use std::mem; ++use std::os::unix::io::{AsRawFd, RawFd}; ++use std::ptr; ++ ++declare_TCFType!{ ++ CFFileDescriptor, CFFileDescriptorRef ++} ++impl_TCFType!(CFFileDescriptor, CFFileDescriptorRef, CFFileDescriptorGetTypeID); ++ ++impl CFFileDescriptor { ++ pub fn new(fd: RawFd, ++ closeOnInvalidate: bool, ++ callout: CFFileDescriptorCallBack, ++ context: Option<&CFFileDescriptorContext>) -> Option { ++ let context = context.map_or(ptr::null(), |c| c as *const _); ++ unsafe { ++ let fd_ref = CFFileDescriptorCreate(kCFAllocatorDefault, ++ fd, ++ closeOnInvalidate as Boolean, ++ callout, ++ context); ++ if fd_ref.is_null() { ++ None ++ } else { ++ Some(TCFType::wrap_under_create_rule(fd_ref)) ++ } ++ } ++ } ++ ++ pub fn context(&self) -> CFFileDescriptorContext { ++ unsafe { ++ let mut context: CFFileDescriptorContext = mem::uninitialized(); ++ CFFileDescriptorGetContext(self.0, &mut context); ++ context ++ } ++ } ++ ++ pub fn enable_callbacks(&self, callback_types: CFOptionFlags) { ++ unsafe { ++ CFFileDescriptorEnableCallBacks(self.0, callback_types) ++ } ++ } ++ ++ pub fn disable_callbacks(&self, callback_types: CFOptionFlags) { ++ unsafe { ++ CFFileDescriptorDisableCallBacks(self.0, callback_types) ++ } ++ } ++ ++ pub fn valid(&self) -> bool { ++ unsafe { ++ CFFileDescriptorIsValid(self.0) != 0 ++ } ++ } ++ ++ pub fn invalidate(&self) { ++ unsafe { ++ CFFileDescriptorInvalidate(self.0) ++ } ++ } ++ ++ pub fn to_run_loop_source(&self, order: CFIndex) -> Option { ++ unsafe { ++ let source_ref = CFFileDescriptorCreateRunLoopSource( ++ kCFAllocatorDefault, ++ self.0, ++ order ++ ); ++ if source_ref.is_null() { ++ None ++ } else { ++ Some(TCFType::wrap_under_create_rule(source_ref)) ++ } ++ } ++ } ++} ++ ++impl AsRawFd for CFFileDescriptor { ++ fn as_raw_fd(&self) -> RawFd { ++ unsafe { ++ CFFileDescriptorGetNativeDescriptor(self.0) ++ } ++ } ++} ++ ++ ++#[cfg(test)] ++mod test { ++ extern crate libc; ++ ++ use super::*; ++ use std::ffi::CString; ++ use std::os::raw::c_void; ++ use core_foundation_sys::base::{CFOptionFlags}; ++ use core_foundation_sys::runloop::{kCFRunLoopDefaultMode}; ++ use libc::O_RDWR; ++ use runloop::{CFRunLoop}; ++ ++ #[test] ++ fn test_consumed() { ++ let path = CString::new("/dev/null").unwrap(); ++ let raw_fd = unsafe { libc::open(path.as_ptr(), O_RDWR, 0) }; ++ let cf_fd = CFFileDescriptor::new(raw_fd, true, never_callback, None); ++ assert!(cf_fd.is_some()); ++ let cf_fd = cf_fd.unwrap(); ++ ++ assert!(cf_fd.valid()); ++ cf_fd.invalidate(); ++ assert!(!cf_fd.valid()); ++ ++ // close() should fail ++ assert_eq!(unsafe { libc::close(raw_fd) }, -1); ++ } ++ ++ #[test] ++ fn test_unconsumed() { ++ let path = CString::new("/dev/null").unwrap(); ++ let raw_fd = unsafe { libc::open(path.as_ptr(), O_RDWR, 0) }; ++ let cf_fd = CFFileDescriptor::new(raw_fd, false, never_callback, None); ++ assert!(cf_fd.is_some()); ++ let cf_fd = cf_fd.unwrap(); ++ ++ assert!(cf_fd.valid()); ++ cf_fd.invalidate(); ++ assert!(!cf_fd.valid()); ++ ++ // close() should succeed ++ assert_eq!(unsafe { libc::close(raw_fd) }, 0); ++ } ++ ++ extern "C" fn never_callback(_f: CFFileDescriptorRef, ++ _callback_types: CFOptionFlags, ++ _info_ptr: *mut c_void) { ++ unreachable!(); ++ } ++ ++ struct TestInfo { ++ value: CFOptionFlags ++ } ++ ++ #[test] ++ fn test_callback() { ++ let mut info = TestInfo { value: 0 }; ++ let context = CFFileDescriptorContext { ++ version: 0, ++ info: &mut info as *mut _ as *mut c_void, ++ retain: None, ++ release: None, ++ copyDescription: None ++ }; ++ ++ let path = CString::new("/dev/null").unwrap(); ++ let raw_fd = unsafe { libc::open(path.as_ptr(), O_RDWR, 0) }; ++ let cf_fd = CFFileDescriptor::new(raw_fd, true, callback, Some(&context)); ++ assert!(cf_fd.is_some()); ++ let cf_fd = cf_fd.unwrap(); ++ ++ assert!(cf_fd.valid()); ++ ++ let run_loop = CFRunLoop::get_current(); ++ let source = CFRunLoopSource::from_file_descriptor(&cf_fd, 0); ++ assert!(source.is_some()); ++ unsafe { ++ run_loop.add_source(&source.unwrap(), kCFRunLoopDefaultMode); ++ } ++ ++ info.value = 0; ++ cf_fd.enable_callbacks(kCFFileDescriptorReadCallBack); ++ CFRunLoop::run_current(); ++ assert_eq!(info.value, kCFFileDescriptorReadCallBack); ++ ++ info.value = 0; ++ cf_fd.enable_callbacks(kCFFileDescriptorWriteCallBack); ++ CFRunLoop::run_current(); ++ assert_eq!(info.value, kCFFileDescriptorWriteCallBack); ++ ++ info.value = 0; ++ cf_fd.disable_callbacks(kCFFileDescriptorReadCallBack | kCFFileDescriptorWriteCallBack); ++ ++ cf_fd.invalidate(); ++ assert!(!cf_fd.valid()); ++ } ++ ++ extern "C" fn callback(_f: CFFileDescriptorRef, callback_types: CFOptionFlags, info_ptr: *mut c_void) { ++ assert!(!info_ptr.is_null()); ++ ++ let info: *mut TestInfo = info_ptr as *mut TestInfo; ++ ++ unsafe { (*info).value = callback_types }; ++ ++ CFRunLoop::get_current().stop(); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/lib.rs index 000000000,000000000..02ad18833 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/lib.rs @@@ -1,0 -1,0 +1,181 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++#![allow(non_snake_case)] ++ ++extern crate core_foundation_sys; ++extern crate libc; ++ ++#[cfg(feature = "with-chrono")] ++extern crate chrono; ++ ++use base::TCFType; ++ ++pub unsafe trait ConcreteCFType: TCFType {} ++ ++#[macro_export] ++macro_rules! declare_TCFType { ++ ( ++ $(#[$doc:meta])* ++ $ty:ident, $raw:ident ++ ) => { ++ $(#[$doc])* ++ pub struct $ty($raw); ++ ++ impl Drop for $ty { ++ fn drop(&mut self) { ++ unsafe { $crate::base::CFRelease(self.as_CFTypeRef()) } ++ } ++ } ++ } ++} ++ ++#[macro_export] ++macro_rules! impl_TCFType { ++ ($ty:ident, $ty_ref:ident, $ty_id:ident) => { ++ impl_TCFType!($ty<>, $ty_ref, $ty_id); ++ unsafe impl $crate::ConcreteCFType for $ty { } ++ }; ++ ++ ($ty:ident<$($p:ident $(: $bound:path)*),*>, $ty_ref:ident, $ty_id:ident) => { ++ impl<$($p $(: $bound)*),*> $crate::base::TCFType for $ty<$($p),*> { ++ type Ref = $ty_ref; ++ ++ #[inline] ++ fn as_concrete_TypeRef(&self) -> $ty_ref { ++ self.0 ++ } ++ ++ #[inline] ++ unsafe fn wrap_under_get_rule(reference: $ty_ref) -> Self { ++ use std::mem; ++ let reference = mem::transmute($crate::base::CFRetain(mem::transmute(reference))); ++ $crate::base::TCFType::wrap_under_create_rule(reference) ++ } ++ ++ #[inline] ++ fn as_CFTypeRef(&self) -> $crate::base::CFTypeRef { ++ unsafe { ++ ::std::mem::transmute(self.as_concrete_TypeRef()) ++ } ++ } ++ ++ #[inline] ++ unsafe fn wrap_under_create_rule(reference: $ty_ref) -> Self { ++ // we need one PhantomData for each type parameter so call ourselves ++ // again with @Phantom $p to produce that ++ $ty(reference $(, impl_TCFType!(@Phantom $p))*) ++ } ++ ++ #[inline] ++ fn type_id() -> $crate::base::CFTypeID { ++ unsafe { ++ $ty_id() ++ } ++ } ++ } ++ ++ impl Clone for $ty { ++ #[inline] ++ fn clone(&self) -> $ty { ++ unsafe { ++ $ty::wrap_under_get_rule(self.0) ++ } ++ } ++ } ++ ++ impl PartialEq for $ty { ++ #[inline] ++ fn eq(&self, other: &$ty) -> bool { ++ self.as_CFType().eq(&other.as_CFType()) ++ } ++ } ++ ++ impl Eq for $ty { } ++ ++ unsafe impl<'a> $crate::base::ToVoid<$ty> for &'a $ty { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ use $crate::base::TCFTypeRef; ++ self.as_concrete_TypeRef().as_void_ptr() ++ } ++ } ++ ++ unsafe impl $crate::base::ToVoid<$ty> for $ty { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ use $crate::base::TCFTypeRef; ++ self.as_concrete_TypeRef().as_void_ptr() ++ } ++ } ++ ++ unsafe impl $crate::base::ToVoid<$ty> for $ty_ref { ++ fn to_void(&self) -> *const ::std::os::raw::c_void { ++ use $crate::base::TCFTypeRef; ++ self.as_void_ptr() ++ } ++ } ++ ++ }; ++ ++ (@Phantom $x:ident) => { ::std::marker::PhantomData }; ++} ++ ++ ++#[macro_export] ++macro_rules! impl_CFTypeDescription { ++ ($ty:ident) => { ++ // it's fine to use an empty <> list ++ impl_CFTypeDescription!($ty<>); ++ }; ++ ($ty:ident<$($p:ident $(: $bound:path)*),*>) => { ++ impl<$($p $(: $bound)*),*> ::std::fmt::Debug for $ty<$($p),*> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ++ self.as_CFType().fmt(f) ++ } ++ } ++ } ++} ++ ++#[macro_export] ++macro_rules! impl_CFComparison { ++ ($ty:ident, $compare:ident) => { ++ impl PartialOrd for $ty { ++ #[inline] ++ fn partial_cmp(&self, other: &$ty) -> Option<::std::cmp::Ordering> { ++ unsafe { ++ Some($compare(self.as_concrete_TypeRef(), other.as_concrete_TypeRef(), ::std::ptr::null_mut()).into()) ++ } ++ } ++ } ++ ++ impl Ord for $ty { ++ #[inline] ++ fn cmp(&self, other: &$ty) -> ::std::cmp::Ordering { ++ self.partial_cmp(other).unwrap() ++ } ++ } ++ } ++} ++ ++pub mod array; ++pub mod attributed_string; ++pub mod base; ++pub mod boolean; ++pub mod data; ++pub mod date; ++pub mod dictionary; ++pub mod error; ++pub mod filedescriptor; ++pub mod number; ++pub mod set; ++pub mod string; ++pub mod url; ++pub mod bundle; ++pub mod propertylist; ++pub mod runloop; ++pub mod timezone; ++pub mod uuid; diff --cc vendor/core-foundation-0.6.1/src/number.rs index 000000000,000000000..96ec312f1 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/number.rs @@@ -1,0 -1,0 +1,110 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Immutable numbers. ++ ++use core_foundation_sys::base::kCFAllocatorDefault; ++pub use core_foundation_sys::number::*; ++use std::mem; ++ ++use base::TCFType; ++ ++ ++declare_TCFType!{ ++ /// An immutable numeric value. ++ CFNumber, CFNumberRef ++} ++impl_TCFType!(CFNumber, CFNumberRef, CFNumberGetTypeID); ++impl_CFTypeDescription!(CFNumber); ++impl_CFComparison!(CFNumber, CFNumberCompare); ++ ++impl CFNumber { ++ #[inline] ++ pub fn to_i64(&self) -> Option { ++ unsafe { ++ let mut value: i64 = 0; ++ let ok = CFNumberGetValue(self.0, kCFNumberSInt64Type, mem::transmute(&mut value)); ++ if ok { Some(value) } else { None } ++ } ++ } ++ ++ #[inline] ++ pub fn to_f32(&self) -> Option { ++ unsafe { ++ let mut value: f32 = 0.0; ++ let ok = CFNumberGetValue(self.0, kCFNumberFloat32Type, mem::transmute(&mut value)); ++ if ok { Some(value) } else { None } ++ } ++ } ++ ++ #[inline] ++ pub fn to_f64(&self) -> Option { ++ unsafe { ++ let mut value: f64 = 0.0; ++ let ok = CFNumberGetValue(self.0, kCFNumberFloat64Type, mem::transmute(&mut value)); ++ if ok { Some(value) } else { None } ++ } ++ } ++} ++ ++impl From for CFNumber { ++ #[inline] ++ fn from(value: i32) -> Self { ++ unsafe { ++ let number_ref = CFNumberCreate( ++ kCFAllocatorDefault, ++ kCFNumberSInt32Type, ++ mem::transmute(&value), ++ ); ++ TCFType::wrap_under_create_rule(number_ref) ++ } ++ } ++} ++ ++impl From for CFNumber { ++ #[inline] ++ fn from(value: i64) -> Self { ++ unsafe { ++ let number_ref = CFNumberCreate( ++ kCFAllocatorDefault, ++ kCFNumberSInt64Type, ++ mem::transmute(&value), ++ ); ++ TCFType::wrap_under_create_rule(number_ref) ++ } ++ } ++} ++ ++impl From for CFNumber { ++ #[inline] ++ fn from(value: f32) -> Self { ++ unsafe { ++ let number_ref = CFNumberCreate( ++ kCFAllocatorDefault, ++ kCFNumberFloat32Type, ++ mem::transmute(&value), ++ ); ++ TCFType::wrap_under_create_rule(number_ref) ++ } ++ } ++} ++ ++impl From for CFNumber { ++ #[inline] ++ fn from(value: f64) -> Self { ++ unsafe { ++ let number_ref = CFNumberCreate( ++ kCFAllocatorDefault, ++ kCFNumberFloat64Type, ++ mem::transmute(&value), ++ ); ++ TCFType::wrap_under_create_rule(number_ref) ++ } ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/propertylist.rs index 000000000,000000000..9a2703b2a new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/propertylist.rs @@@ -1,0 -1,0 +1,325 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation property lists ++ ++use std::ptr; ++use std::mem; ++use std::os::raw::c_void; ++ ++use error::CFError; ++use data::CFData; ++use base::{CFType, TCFType, TCFTypeRef}; ++ ++pub use core_foundation_sys::propertylist::*; ++use core_foundation_sys::error::CFErrorRef; ++use core_foundation_sys::base::{CFGetRetainCount, CFGetTypeID, CFIndex, CFRetain, ++ CFShow, CFTypeID, kCFAllocatorDefault}; ++ ++pub fn create_with_data(data: CFData, ++ options: CFPropertyListMutabilityOptions) ++ -> Result<(*const c_void, CFPropertyListFormat), CFError> { ++ unsafe { ++ let mut error: CFErrorRef = ptr::null_mut(); ++ let mut format: CFPropertyListFormat = 0; ++ let property_list = CFPropertyListCreateWithData(kCFAllocatorDefault, ++ data.as_concrete_TypeRef(), ++ options, ++ &mut format, ++ &mut error); ++ if property_list.is_null() { ++ Err(TCFType::wrap_under_create_rule(error)) ++ } else { ++ Ok((property_list, format)) ++ } ++ } ++} ++ ++pub fn create_data(property_list: *const c_void, format: CFPropertyListFormat) -> Result { ++ unsafe { ++ let mut error: CFErrorRef = ptr::null_mut(); ++ let data_ref = CFPropertyListCreateData(kCFAllocatorDefault, ++ property_list, ++ format, ++ 0, ++ &mut error); ++ if data_ref.is_null() { ++ Err(TCFType::wrap_under_create_rule(error)) ++ } else { ++ Ok(TCFType::wrap_under_create_rule(data_ref)) ++ } ++ } ++} ++ ++ ++/// Trait for all subclasses of [`CFPropertyList`]. ++/// ++/// [`CFPropertyList`]: struct.CFPropertyList.html ++pub trait CFPropertyListSubClass: TCFType { ++ /// Create an instance of the superclass type [`CFPropertyList`] for this instance. ++ /// ++ /// [`CFPropertyList`]: struct.CFPropertyList.html ++ #[inline] ++ fn to_CFPropertyList(&self) -> CFPropertyList { ++ unsafe { CFPropertyList::wrap_under_get_rule(self.as_concrete_TypeRef().as_void_ptr()) } ++ } ++ ++ /// Equal to [`to_CFPropertyList`], but consumes self and avoids changing the reference count. ++ /// ++ /// [`to_CFPropertyList`]: #method.to_CFPropertyList ++ #[inline] ++ fn into_CFPropertyList(self) -> CFPropertyList ++ where ++ Self: Sized, ++ { ++ let reference = self.as_concrete_TypeRef().as_void_ptr(); ++ mem::forget(self); ++ unsafe { CFPropertyList::wrap_under_create_rule(reference) } ++ } ++} ++ ++impl CFPropertyListSubClass for ::data::CFData {} ++impl CFPropertyListSubClass for ::string::CFString {} ++impl CFPropertyListSubClass for ::array::CFArray {} ++impl CFPropertyListSubClass for ::dictionary::CFDictionary {} ++impl CFPropertyListSubClass for ::date::CFDate {} ++impl CFPropertyListSubClass for ::boolean::CFBoolean {} ++impl CFPropertyListSubClass for ::number::CFNumber {} ++ ++ ++declare_TCFType!{ ++ /// A CFPropertyList struct. This is superclass to [`CFData`], [`CFString`], [`CFArray`], ++ /// [`CFDictionary`], [`CFDate`], [`CFBoolean`], and [`CFNumber`]. ++ /// ++ /// This superclass type does not have its own `CFTypeID`, instead each instance has the `CFTypeID` ++ /// of the subclass it is an instance of. Thus, this type cannot implement the [`TCFType`] trait, ++ /// since it cannot implement the static [`TCFType::type_id()`] method. ++ /// ++ /// [`CFData`]: ../data/struct.CFData.html ++ /// [`CFString`]: ../string/struct.CFString.html ++ /// [`CFArray`]: ../array/struct.CFArray.html ++ /// [`CFDictionary`]: ../dictionary/struct.CFDictionary.html ++ /// [`CFDate`]: ../date/struct.CFDate.html ++ /// [`CFBoolean`]: ../boolean/struct.CFBoolean.html ++ /// [`CFNumber`]: ../number/struct.CFNumber.html ++ /// [`TCFType`]: ../base/trait.TCFType.html ++ /// [`TCFType::type_id()`]: ../base/trait.TCFType.html#method.type_of ++ CFPropertyList, CFPropertyListRef ++} ++ ++impl CFPropertyList { ++ #[inline] ++ pub fn as_concrete_TypeRef(&self) -> CFPropertyListRef { ++ self.0 ++ } ++ ++ #[inline] ++ pub unsafe fn wrap_under_get_rule(reference: CFPropertyListRef) -> CFPropertyList { ++ let reference = mem::transmute(CFRetain(mem::transmute(reference))); ++ CFPropertyList(reference) ++ } ++ ++ #[inline] ++ pub fn as_CFType(&self) -> CFType { ++ unsafe { CFType::wrap_under_get_rule(self.as_CFTypeRef()) } ++ } ++ ++ #[inline] ++ pub fn into_CFType(self) -> CFType ++ where ++ Self: Sized, ++ { ++ let reference = self.as_CFTypeRef(); ++ mem::forget(self); ++ unsafe { TCFType::wrap_under_create_rule(reference) } ++ } ++ ++ #[inline] ++ pub fn as_CFTypeRef(&self) -> ::core_foundation_sys::base::CFTypeRef { ++ unsafe { mem::transmute(self.as_concrete_TypeRef()) } ++ } ++ ++ #[inline] ++ pub unsafe fn wrap_under_create_rule(obj: CFPropertyListRef) -> CFPropertyList { ++ CFPropertyList(obj) ++ } ++ ++ /// Returns the reference count of the object. It is unwise to do anything other than test ++ /// whether the return value of this method is greater than zero. ++ #[inline] ++ pub fn retain_count(&self) -> CFIndex { ++ unsafe { CFGetRetainCount(self.as_CFTypeRef()) } ++ } ++ ++ /// Returns the type ID of this object. Will be one of CFData, CFString, CFArray, CFDictionary, ++ /// CFDate, CFBoolean, or CFNumber. ++ #[inline] ++ pub fn type_of(&self) -> CFTypeID { ++ unsafe { CFGetTypeID(self.as_CFTypeRef()) } ++ } ++ ++ /// Writes a debugging version of this object on standard error. ++ pub fn show(&self) { ++ unsafe { CFShow(self.as_CFTypeRef()) } ++ } ++ ++ /// Returns true if this value is an instance of another type. ++ #[inline] ++ pub fn instance_of(&self) -> bool { ++ self.type_of() == OtherCFType::type_id() ++ } ++} ++ ++impl Clone for CFPropertyList { ++ #[inline] ++ fn clone(&self) -> CFPropertyList { ++ unsafe { CFPropertyList::wrap_under_get_rule(self.0) } ++ } ++} ++ ++impl PartialEq for CFPropertyList { ++ #[inline] ++ fn eq(&self, other: &CFPropertyList) -> bool { ++ self.as_CFType().eq(&other.as_CFType()) ++ } ++} ++ ++impl Eq for CFPropertyList {} ++ ++impl CFPropertyList { ++ /// Try to downcast the [`CFPropertyList`] to a subclass. Checking if the instance is the ++ /// correct subclass happens at runtime and `None` is returned if it is not the correct type. ++ /// Works similar to [`Box::downcast`] and [`CFType::downcast`]. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// # use core_foundation::string::CFString; ++ /// # use core_foundation::propertylist::{CFPropertyList, CFPropertyListSubClass}; ++ /// # ++ /// // Create a string. ++ /// let string: CFString = CFString::from_static_string("FooBar"); ++ /// // Cast it up to a property list. ++ /// let propertylist: CFPropertyList = string.to_CFPropertyList(); ++ /// // Cast it down again. ++ /// assert!(propertylist.downcast::().unwrap().to_string() == "FooBar"); ++ /// ``` ++ /// ++ /// [`CFPropertyList`]: struct.CFPropertyList.html ++ /// [`Box::downcast`]: https://doc.rust-lang.org/std/boxed/struct.Box.html#method.downcast ++ pub fn downcast(&self) -> Option { ++ if self.instance_of::() { ++ unsafe { ++ let subclass_ref = T::Ref::from_void_ptr(self.0); ++ Some(T::wrap_under_get_rule(subclass_ref)) ++ } ++ } else { ++ None ++ } ++ } ++ ++ /// Similar to [`downcast`], but consumes self and can thus avoid touching the retain count. ++ /// ++ /// [`downcast`]: #method.downcast ++ pub fn downcast_into(self) -> Option { ++ if self.instance_of::() { ++ unsafe { ++ let subclass_ref = T::Ref::from_void_ptr(self.0); ++ mem::forget(self); ++ Some(T::wrap_under_create_rule(subclass_ref)) ++ } ++ } else { ++ None ++ } ++ } ++} ++ ++ ++ ++#[cfg(test)] ++pub mod test { ++ use super::*; ++ use string::CFString; ++ use boolean::CFBoolean; ++ ++ #[test] ++ fn test_property_list_serialization() { ++ use base::{TCFType, CFEqual}; ++ use boolean::CFBoolean; ++ use number::CFNumber; ++ use dictionary::CFDictionary; ++ use string::CFString; ++ use super::*; ++ ++ let bar = CFString::from_static_string("Bar"); ++ let baz = CFString::from_static_string("Baz"); ++ let boo = CFString::from_static_string("Boo"); ++ let foo = CFString::from_static_string("Foo"); ++ let tru = CFBoolean::true_value(); ++ let n42 = CFNumber::from(42); ++ ++ let dict1 = CFDictionary::from_CFType_pairs(&[(bar.as_CFType(), boo.as_CFType()), ++ (baz.as_CFType(), tru.as_CFType()), ++ (foo.as_CFType(), n42.as_CFType())]); ++ ++ let data = create_data(dict1.as_CFTypeRef(), kCFPropertyListXMLFormat_v1_0).unwrap(); ++ let (dict2, _) = create_with_data(data, kCFPropertyListImmutable).unwrap(); ++ unsafe { ++ assert!(CFEqual(dict1.as_CFTypeRef(), dict2) == 1); ++ } ++ } ++ ++ #[test] ++ fn to_propertylist_retain_count() { ++ let string = CFString::from_static_string("Bar"); ++ assert_eq!(string.retain_count(), 1); ++ ++ let propertylist = string.to_CFPropertyList(); ++ assert_eq!(string.retain_count(), 2); ++ assert_eq!(propertylist.retain_count(), 2); ++ ++ mem::drop(string); ++ assert_eq!(propertylist.retain_count(), 1); ++ } ++ ++ #[test] ++ fn downcast_string() { ++ let propertylist = CFString::from_static_string("Bar").to_CFPropertyList(); ++ assert!(propertylist.downcast::().unwrap().to_string() == "Bar"); ++ assert!(propertylist.downcast::().is_none()); ++ } ++ ++ #[test] ++ fn downcast_boolean() { ++ let propertylist = CFBoolean::true_value().to_CFPropertyList(); ++ assert!(propertylist.downcast::().is_some()); ++ assert!(propertylist.downcast::().is_none()); ++ } ++ ++ #[test] ++ fn downcast_into_fail() { ++ let string = CFString::from_static_string("Bar"); ++ let propertylist = string.to_CFPropertyList(); ++ assert_eq!(string.retain_count(), 2); ++ ++ assert!(propertylist.downcast_into::().is_none()); ++ assert_eq!(string.retain_count(), 1); ++ } ++ ++ #[test] ++ fn downcast_into() { ++ let string = CFString::from_static_string("Bar"); ++ let propertylist = string.to_CFPropertyList(); ++ assert_eq!(string.retain_count(), 2); ++ ++ let string2 = propertylist.downcast_into::().unwrap(); ++ assert!(string2.to_string() == "Bar"); ++ assert_eq!(string2.retain_count(), 2); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/runloop.rs index 000000000,000000000..24aa9a5c6 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/runloop.rs @@@ -1,0 -1,0 +1,199 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++#![allow(non_upper_case_globals)] ++ ++pub use core_foundation_sys::runloop::*; ++use core_foundation_sys::base::CFIndex; ++use core_foundation_sys::base::{kCFAllocatorDefault, CFOptionFlags}; ++use core_foundation_sys::string::CFStringRef; ++ ++use base::{TCFType}; ++use date::{CFAbsoluteTime, CFTimeInterval}; ++use filedescriptor::CFFileDescriptor; ++use string::{CFString}; ++ ++pub type CFRunLoopMode = CFStringRef; ++ ++ ++declare_TCFType!(CFRunLoop, CFRunLoopRef); ++impl_TCFType!(CFRunLoop, CFRunLoopRef, CFRunLoopGetTypeID); ++impl_CFTypeDescription!(CFRunLoop); ++ ++impl CFRunLoop { ++ pub fn get_current() -> CFRunLoop { ++ unsafe { ++ let run_loop_ref = CFRunLoopGetCurrent(); ++ TCFType::wrap_under_get_rule(run_loop_ref) ++ } ++ } ++ ++ pub fn get_main() -> CFRunLoop { ++ unsafe { ++ let run_loop_ref = CFRunLoopGetMain(); ++ TCFType::wrap_under_get_rule(run_loop_ref) ++ } ++ } ++ ++ pub fn run_current() { ++ unsafe { ++ CFRunLoopRun(); ++ } ++ } ++ ++ pub fn stop(&self) { ++ unsafe { ++ CFRunLoopStop(self.0); ++ } ++ } ++ ++ pub fn current_mode(&self) -> Option { ++ unsafe { ++ let string_ref = CFRunLoopCopyCurrentMode(self.0); ++ if string_ref.is_null() { ++ return None; ++ } ++ ++ let cf_string: CFString = TCFType::wrap_under_create_rule(string_ref); ++ Some(cf_string.to_string()) ++ } ++ } ++ ++ pub fn contains_timer(&self, timer: &CFRunLoopTimer, mode: CFRunLoopMode) -> bool { ++ unsafe { ++ CFRunLoopContainsTimer(self.0, timer.0, mode) != 0 ++ } ++ } ++ ++ pub fn add_timer(&self, timer: &CFRunLoopTimer, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopAddTimer(self.0, timer.0, mode); ++ } ++ } ++ ++ pub fn remove_timer(&self, timer: &CFRunLoopTimer, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopRemoveTimer(self.0, timer.0, mode); ++ } ++ } ++ ++ pub fn contains_source(&self, source: &CFRunLoopSource, mode: CFRunLoopMode) -> bool { ++ unsafe { ++ CFRunLoopContainsSource(self.0, source.0, mode) != 0 ++ } ++ } ++ ++ pub fn add_source(&self, source: &CFRunLoopSource, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopAddSource(self.0, source.0, mode); ++ } ++ } ++ ++ pub fn remove_source(&self, source: &CFRunLoopSource, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopRemoveSource(self.0, source.0, mode); ++ } ++ } ++ ++ pub fn contains_observer(&self, observer: &CFRunLoopObserver, mode: CFRunLoopMode) -> bool { ++ unsafe { ++ CFRunLoopContainsObserver(self.0, observer.0, mode) != 0 ++ } ++ } ++ ++ pub fn add_observer(&self, observer: &CFRunLoopObserver, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopAddObserver(self.0, observer.0, mode); ++ } ++ } ++ ++ pub fn remove_observer(&self, observer: &CFRunLoopObserver, mode: CFRunLoopMode) { ++ unsafe { ++ CFRunLoopRemoveObserver(self.0, observer.0, mode); ++ } ++ } ++ ++} ++ ++ ++declare_TCFType!(CFRunLoopTimer, CFRunLoopTimerRef); ++impl_TCFType!(CFRunLoopTimer, CFRunLoopTimerRef, CFRunLoopTimerGetTypeID); ++ ++impl CFRunLoopTimer { ++ pub fn new(fireDate: CFAbsoluteTime, interval: CFTimeInterval, flags: CFOptionFlags, order: CFIndex, callout: CFRunLoopTimerCallBack, context: *mut CFRunLoopTimerContext) -> CFRunLoopTimer { ++ unsafe { ++ let timer_ref = CFRunLoopTimerCreate(kCFAllocatorDefault, fireDate, interval, flags, order, callout, context); ++ TCFType::wrap_under_create_rule(timer_ref) ++ } ++ } ++} ++ ++ ++declare_TCFType!(CFRunLoopSource, CFRunLoopSourceRef); ++impl_TCFType!(CFRunLoopSource, CFRunLoopSourceRef, CFRunLoopSourceGetTypeID); ++ ++impl CFRunLoopSource { ++ pub fn from_file_descriptor(fd: &CFFileDescriptor, order: CFIndex) -> Option { ++ fd.to_run_loop_source(order) ++ } ++} ++ ++declare_TCFType!(CFRunLoopObserver, CFRunLoopObserverRef); ++impl_TCFType!(CFRunLoopObserver, CFRunLoopObserverRef, CFRunLoopObserverGetTypeID); ++ ++#[cfg(test)] ++mod test { ++ use super::*; ++ use date::{CFDate, CFAbsoluteTime}; ++ use std::mem; ++ use std::os::raw::c_void; ++ use std::sync::mpsc; ++ ++ #[test] ++ fn wait_200_milliseconds() { ++ let run_loop = CFRunLoop::get_current(); ++ ++ let now = CFDate::now().abs_time(); ++ let (elapsed_tx, elapsed_rx) = mpsc::channel(); ++ let mut info = Info { ++ start_time: now, ++ elapsed_tx, ++ }; ++ let mut context = unsafe { CFRunLoopTimerContext { ++ version: 0, ++ info: &mut info as *mut _ as *mut c_void, ++ retain: mem::zeroed(), ++ release: mem::zeroed(), ++ copyDescription: mem::zeroed(), ++ } }; ++ ++ ++ let run_loop_timer = CFRunLoopTimer::new(now + 0.20f64, 0f64, 0, 0, timer_popped, &mut context); ++ unsafe { ++ run_loop.add_timer(&run_loop_timer, kCFRunLoopDefaultMode); ++ } ++ CFRunLoop::run_current(); ++ let elapsed = elapsed_rx.try_recv().unwrap(); ++ println!("wait_200_milliseconds, elapsed: {}", elapsed); ++ assert!(elapsed > 0.19 && elapsed < 0.30); ++ } ++ ++ struct Info { ++ start_time: CFAbsoluteTime, ++ elapsed_tx: mpsc::Sender, ++ } ++ ++ extern "C" fn timer_popped(_timer: CFRunLoopTimerRef, raw_info: *mut c_void) { ++ let info: *mut Info = unsafe { mem::transmute(raw_info) }; ++ let now = CFDate::now().abs_time(); ++ let elapsed = now - unsafe { (*info).start_time }; ++ let _ = unsafe { (*info).elapsed_tx.send(elapsed) }; ++ CFRunLoop::get_current().stop(); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/set.rs index 000000000,000000000..6ef9f38c2 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/set.rs @@@ -1,0 -1,0 +1,45 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! An immutable bag of elements. ++ ++pub use core_foundation_sys::set::*; ++use core_foundation_sys::base::{CFTypeRef, CFRelease, kCFAllocatorDefault}; ++ ++use base::{CFIndexConvertible, TCFType}; ++ ++use std::mem; ++use std::os::raw::c_void; ++use std::marker::PhantomData; ++ ++/// An immutable bag of elements. ++pub struct CFSet(CFSetRef, PhantomData); ++ ++impl Drop for CFSet { ++ fn drop(&mut self) { ++ unsafe { CFRelease(self.as_CFTypeRef()) } ++ } ++} ++ ++impl_TCFType!(CFSet, CFSetRef, CFSetGetTypeID); ++impl_CFTypeDescription!(CFSet); ++ ++impl CFSet { ++ /// Creates a new set from a list of `CFType` instances. ++ pub fn from_slice(elems: &[T]) -> CFSet where T: TCFType { ++ unsafe { ++ let elems: Vec = elems.iter().map(|elem| elem.as_CFTypeRef()).collect(); ++ let set_ref = CFSetCreate(kCFAllocatorDefault, ++ mem::transmute(elems.as_ptr()), ++ elems.len().to_CFIndex(), ++ &kCFTypeSetCallBacks); ++ TCFType::wrap_under_create_rule(set_ref) ++ } ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/string.rs index 000000000,000000000..4dc0a7201 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/string.rs @@@ -1,0 -1,0 +1,150 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Immutable strings. ++ ++pub use core_foundation_sys::string::*; ++ ++use base::{CFIndexConvertible, TCFType}; ++ ++use core_foundation_sys::base::{Boolean, CFIndex, CFRange}; ++use core_foundation_sys::base::{kCFAllocatorDefault, kCFAllocatorNull}; ++use std::borrow::Cow; ++use std::fmt; ++use std::str::{self, FromStr}; ++use std::ptr; ++use std::ffi::CStr; ++ ++ ++declare_TCFType!{ ++ /// An immutable string in one of a variety of encodings. ++ CFString, CFStringRef ++} ++impl_TCFType!(CFString, CFStringRef, CFStringGetTypeID); ++ ++impl FromStr for CFString { ++ type Err = (); ++ ++ /// See also CFString::new for a variant of this which does not return a Result ++ #[inline] ++ fn from_str(string: &str) -> Result { ++ Ok(CFString::new(string)) ++ } ++} ++ ++impl<'a> From<&'a str> for CFString { ++ #[inline] ++ fn from(string: &'a str) -> CFString { ++ CFString::new(string) ++ } ++} ++ ++impl<'a> From<&'a CFString> for Cow<'a, str> { ++ fn from(cf_str: &'a CFString) -> Cow<'a, str> { ++ unsafe { ++ // Do this without allocating if we can get away with it ++ let c_string = CFStringGetCStringPtr(cf_str.0, kCFStringEncodingUTF8); ++ if c_string != ptr::null() { ++ let c_str = CStr::from_ptr(c_string); ++ Cow::Borrowed(str::from_utf8_unchecked(c_str.to_bytes())) ++ } else { ++ let char_len = cf_str.char_len(); ++ ++ // First, ask how big the buffer ought to be. ++ let mut bytes_required: CFIndex = 0; ++ CFStringGetBytes(cf_str.0, ++ CFRange { location: 0, length: char_len }, ++ kCFStringEncodingUTF8, ++ 0, ++ false as Boolean, ++ ptr::null_mut(), ++ 0, ++ &mut bytes_required); ++ ++ // Then, allocate the buffer and actually copy. ++ let mut buffer = vec![b'\x00'; bytes_required as usize]; ++ ++ let mut bytes_used: CFIndex = 0; ++ let chars_written = CFStringGetBytes(cf_str.0, ++ CFRange { location: 0, length: char_len }, ++ kCFStringEncodingUTF8, ++ 0, ++ false as Boolean, ++ buffer.as_mut_ptr(), ++ buffer.len().to_CFIndex(), ++ &mut bytes_used); ++ assert!(chars_written == char_len); ++ ++ // This is dangerous; we over-allocate and null-terminate the string (during ++ // initialization). ++ assert!(bytes_used == buffer.len().to_CFIndex()); ++ Cow::Owned(String::from_utf8_unchecked(buffer)) ++ } ++ } ++ } ++} ++ ++impl fmt::Display for CFString { ++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { ++ fmt.write_str(&Cow::from(self)) ++ } ++} ++ ++impl fmt::Debug for CFString { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "\"{}\"", self) ++ } ++} ++ ++ ++impl CFString { ++ /// Creates a new `CFString` instance from a Rust string. ++ #[inline] ++ pub fn new(string: &str) -> CFString { ++ unsafe { ++ let string_ref = CFStringCreateWithBytes(kCFAllocatorDefault, ++ string.as_ptr(), ++ string.len().to_CFIndex(), ++ kCFStringEncodingUTF8, ++ false as Boolean); ++ CFString::wrap_under_create_rule(string_ref) ++ } ++ } ++ ++ /// Like `CFString::new`, but references a string that can be used as a backing store ++ /// by virtue of being statically allocated. ++ #[inline] ++ pub fn from_static_string(string: &'static str) -> CFString { ++ unsafe { ++ let string_ref = CFStringCreateWithBytesNoCopy(kCFAllocatorDefault, ++ string.as_ptr(), ++ string.len().to_CFIndex(), ++ kCFStringEncodingUTF8, ++ false as Boolean, ++ kCFAllocatorNull); ++ TCFType::wrap_under_create_rule(string_ref) ++ } ++ } ++ ++ /// Returns the number of characters in the string. ++ #[inline] ++ pub fn char_len(&self) -> CFIndex { ++ unsafe { ++ CFStringGetLength(self.0) ++ } ++ } ++} ++ ++#[test] ++fn string_and_back() { ++ let original = "The quick brown fox jumped over the slow lazy dog."; ++ let cfstr = CFString::from_static_string(original); ++ let converted = cfstr.to_string(); ++ assert!(converted == original); ++} diff --cc vendor/core-foundation-0.6.1/src/timezone.rs index 000000000,000000000..66aadb77a new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/timezone.rs @@@ -1,0 -1,0 +1,95 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation time zone objects. ++ ++pub use core_foundation_sys::timezone::*; ++use core_foundation_sys::base::kCFAllocatorDefault; ++ ++use base::TCFType; ++use date::{CFDate, CFTimeInterval}; ++ ++#[cfg(feature = "with-chrono")] ++use chrono::{FixedOffset, NaiveDateTime}; ++ ++ ++declare_TCFType!{ ++ /// A time zone. ++ CFTimeZone, CFTimeZoneRef ++} ++impl_TCFType!(CFTimeZone, CFTimeZoneRef, CFTimeZoneGetTypeID); ++impl_CFTypeDescription!(CFTimeZone); ++ ++impl Default for CFTimeZone { ++ fn default() -> CFTimeZone { ++ unsafe { ++ let tz_ref = CFTimeZoneCopyDefault(); ++ TCFType::wrap_under_create_rule(tz_ref) ++ } ++ } ++} ++ ++impl CFTimeZone { ++ #[inline] ++ pub fn new(interval: CFTimeInterval) -> CFTimeZone { ++ unsafe { ++ let tz_ref = CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, interval); ++ TCFType::wrap_under_create_rule(tz_ref) ++ } ++ } ++ ++ #[inline] ++ pub fn system() -> CFTimeZone { ++ unsafe { ++ let tz_ref = CFTimeZoneCopySystem(); ++ TCFType::wrap_under_create_rule(tz_ref) ++ } ++ } ++ ++ pub fn seconds_from_gmt(&self, date: CFDate) -> CFTimeInterval { ++ unsafe { ++ CFTimeZoneGetSecondsFromGMT(self.0, date.abs_time()) ++ } ++ } ++ ++ #[cfg(feature = "with-chrono")] ++ pub fn offset_at_date(&self, date: NaiveDateTime) -> FixedOffset { ++ let date = CFDate::from_naive_utc(date); ++ FixedOffset::east(self.seconds_from_gmt(date) as i32) ++ } ++ ++ #[cfg(feature = "with-chrono")] ++ pub fn from_offset(offset: FixedOffset) -> CFTimeZone { ++ CFTimeZone::new(offset.local_minus_utc() as f64) ++ } ++} ++ ++#[cfg(test)] ++mod test { ++ use super::CFTimeZone; ++ ++ #[cfg(feature = "with-chrono")] ++ use chrono::{NaiveDateTime, FixedOffset}; ++ ++ #[test] ++ fn timezone_comparison() { ++ let system = CFTimeZone::system(); ++ let default = CFTimeZone::default(); ++ assert_eq!(system, default); ++ } ++ ++ #[test] ++ #[cfg(feature = "with-chrono")] ++ fn timezone_chrono_conversion() { ++ let offset = FixedOffset::west(28800); ++ let tz = CFTimeZone::from_offset(offset); ++ let converted = tz.offset_at_date(NaiveDateTime::from_timestamp(0, 0)); ++ assert_eq!(offset, converted); ++ } ++} diff --cc vendor/core-foundation-0.6.1/src/url.rs index 000000000,000000000..edf1de88b new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/url.rs @@@ -1,0 -1,0 +1,156 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! A URL type for Core Foundation. ++ ++pub use core_foundation_sys::url::*; ++ ++use base::{TCFType, CFIndex}; ++use string::{CFString}; ++ ++use core_foundation_sys::base::{kCFAllocatorDefault, Boolean}; ++use std::fmt; ++use std::ptr; ++use std::path::{Path, PathBuf}; ++use std::mem; ++ ++use libc::{strlen, PATH_MAX}; ++ ++#[cfg(unix)] ++use std::os::unix::ffi::OsStrExt; ++#[cfg(unix)] ++use std::ffi::OsStr; ++ ++ ++declare_TCFType!(CFURL, CFURLRef); ++impl_TCFType!(CFURL, CFURLRef, CFURLGetTypeID); ++ ++impl fmt::Debug for CFURL { ++ #[inline] ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ unsafe { ++ let string: CFString = TCFType::wrap_under_get_rule(CFURLGetString(self.0)); ++ write!(f, "{}", string.to_string()) ++ } ++ } ++} ++ ++impl CFURL { ++ pub fn from_path>(path: P, isDirectory: bool) -> Option { ++ let path_bytes; ++ #[cfg(unix)] ++ { ++ path_bytes = path.as_ref().as_os_str().as_bytes() ++ } ++ #[cfg(not(unix))] ++ { ++ // XXX: Getting non-valid UTF8 paths into CoreFoundation on Windows is going to be unpleasant ++ // CFURLGetWideFileSystemRepresentation might help ++ path_bytes = match path.as_ref().to_str() { ++ Some(path) => path, ++ None => return None, ++ } ++ } ++ ++ unsafe { ++ let url_ref = CFURLCreateFromFileSystemRepresentation(ptr::null_mut(), path_bytes.as_ptr(), path_bytes.len() as CFIndex, isDirectory as u8); ++ if url_ref.is_null() { ++ return None; ++ } ++ Some(TCFType::wrap_under_create_rule(url_ref)) ++ } ++ } ++ ++ pub fn from_file_system_path(filePath: CFString, pathStyle: CFURLPathStyle, isDirectory: bool) -> CFURL { ++ unsafe { ++ let url_ref = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, filePath.as_concrete_TypeRef(), pathStyle, isDirectory as u8); ++ TCFType::wrap_under_create_rule(url_ref) ++ } ++ } ++ ++ #[cfg(unix)] ++ pub fn to_path(&self) -> Option { ++ // implementing this on Windows is more complicated because of the different OsStr representation ++ unsafe { ++ let mut buf: [u8; PATH_MAX as usize] = mem::uninitialized(); ++ let result = CFURLGetFileSystemRepresentation(self.0, true as Boolean, buf.as_mut_ptr(), buf.len() as CFIndex); ++ if result == false as Boolean { ++ return None; ++ } ++ let len = strlen(buf.as_ptr() as *const i8); ++ let path = OsStr::from_bytes(&buf[0..len]); ++ Some(PathBuf::from(path)) ++ } ++ } ++ ++ pub fn get_string(&self) -> CFString { ++ unsafe { ++ TCFType::wrap_under_get_rule(CFURLGetString(self.0)) ++ } ++ } ++ ++ pub fn get_file_system_path(&self, pathStyle: CFURLPathStyle) -> CFString { ++ unsafe { ++ TCFType::wrap_under_create_rule(CFURLCopyFileSystemPath(self.as_concrete_TypeRef(), pathStyle)) ++ } ++ } ++ ++ pub fn absolute(&self) -> CFURL { ++ unsafe { ++ TCFType::wrap_under_create_rule(CFURLCopyAbsoluteURL(self.as_concrete_TypeRef())) ++ } ++ } ++} ++ ++#[test] ++fn file_url_from_path() { ++ let path = "/usr/local/foo/"; ++ let cfstr_path = CFString::from_static_string(path); ++ let cfurl = CFURL::from_file_system_path(cfstr_path, kCFURLPOSIXPathStyle, true); ++ assert_eq!(cfurl.get_string().to_string(), "file:///usr/local/foo/"); ++} ++ ++#[cfg(unix)] ++#[test] ++fn non_utf8() { ++ use std::ffi::OsStr; ++ let path = Path::new(OsStr::from_bytes(b"/\xC0/blame")); ++ let cfurl = CFURL::from_path(path, false).unwrap(); ++ assert_eq!(cfurl.to_path().unwrap(), path); ++ let len = unsafe { CFURLGetBytes(cfurl.as_concrete_TypeRef(), ptr::null_mut(), 0) }; ++ assert_eq!(len, 17); ++} ++ ++#[test] ++fn absolute_file_url() { ++ use core_foundation_sys::url::CFURLCreateWithFileSystemPathRelativeToBase; ++ use std::path::PathBuf; ++ ++ let path = "/usr/local/foo"; ++ let file = "bar"; ++ ++ let cfstr_path = CFString::from_static_string(path); ++ let cfstr_file = CFString::from_static_string(file); ++ let cfurl_base = CFURL::from_file_system_path(cfstr_path, kCFURLPOSIXPathStyle, true); ++ let cfurl_relative: CFURL = unsafe { ++ let url_ref = CFURLCreateWithFileSystemPathRelativeToBase(kCFAllocatorDefault, ++ cfstr_file.as_concrete_TypeRef(), ++ kCFURLPOSIXPathStyle, ++ false as u8, ++ cfurl_base.as_concrete_TypeRef()); ++ TCFType::wrap_under_create_rule(url_ref) ++ }; ++ ++ let mut absolute_path = PathBuf::from(path); ++ absolute_path.push(file); ++ ++ assert_eq!(cfurl_relative.get_file_system_path(kCFURLPOSIXPathStyle).to_string(), file); ++ assert_eq!(cfurl_relative.absolute().get_file_system_path(kCFURLPOSIXPathStyle).to_string(), ++ absolute_path.to_str().unwrap()); ++} diff --cc vendor/core-foundation-0.6.1/src/uuid.rs index 000000000,000000000..80613370c new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/src/uuid.rs @@@ -1,0 -1,0 +1,112 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! Core Foundation UUID objects. ++ ++#[cfg(feature = "with-uuid")] ++extern crate uuid; ++ ++pub use core_foundation_sys::uuid::*; ++use core_foundation_sys::base::kCFAllocatorDefault; ++ ++use base::TCFType; ++ ++#[cfg(feature = "with-uuid")] ++use self::uuid::Uuid; ++ ++ ++declare_TCFType! { ++ /// A UUID. ++ CFUUID, CFUUIDRef ++} ++impl_TCFType!(CFUUID, CFUUIDRef, CFUUIDGetTypeID); ++impl_CFTypeDescription!(CFUUID); ++ ++impl CFUUID { ++ #[inline] ++ pub fn new() -> CFUUID { ++ unsafe { ++ let uuid_ref = CFUUIDCreate(kCFAllocatorDefault); ++ TCFType::wrap_under_create_rule(uuid_ref) ++ } ++ } ++} ++ ++#[cfg(feature = "with-uuid")] ++impl Into for CFUUID { ++ fn into(self) -> Uuid { ++ let b = unsafe { ++ CFUUIDGetUUIDBytes(self.0) ++ }; ++ let bytes = [ ++ b.byte0, ++ b.byte1, ++ b.byte2, ++ b.byte3, ++ b.byte4, ++ b.byte5, ++ b.byte6, ++ b.byte7, ++ b.byte8, ++ b.byte9, ++ b.byte10, ++ b.byte11, ++ b.byte12, ++ b.byte13, ++ b.byte14, ++ b.byte15, ++ ]; ++ Uuid::from_bytes(&bytes).unwrap() ++ } ++} ++ ++#[cfg(feature = "with-uuid")] ++impl From for CFUUID { ++ fn from(uuid: Uuid) -> CFUUID { ++ let b = uuid.as_bytes(); ++ let bytes = CFUUIDBytes { ++ byte0: b[0], ++ byte1: b[1], ++ byte2: b[2], ++ byte3: b[3], ++ byte4: b[4], ++ byte5: b[5], ++ byte6: b[6], ++ byte7: b[7], ++ byte8: b[8], ++ byte9: b[9], ++ byte10: b[10], ++ byte11: b[11], ++ byte12: b[12], ++ byte13: b[13], ++ byte14: b[14], ++ byte15: b[15], ++ }; ++ unsafe { ++ let uuid_ref = CFUUIDCreateFromUUIDBytes(kCFAllocatorDefault, bytes); ++ TCFType::wrap_under_create_rule(uuid_ref) ++ } ++ } ++} ++ ++ ++#[cfg(test)] ++#[cfg(feature = "with-uuid")] ++mod test { ++ use super::CFUUID; ++ use uuid::Uuid; ++ ++ #[test] ++ fn uuid_conversion() { ++ let cf_uuid = CFUUID::new(); ++ let uuid: Uuid = cf_uuid.clone().into(); ++ let converted = CFUUID::from(uuid); ++ assert!(cf_uuid == converted); ++ } ++} diff --cc vendor/core-foundation-0.6.1/tests/use_macro_outside_crate.rs index 000000000,000000000..ff1c17d4c new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-0.6.1/tests/use_macro_outside_crate.rs @@@ -1,0 -1,0 +1,28 @@@ ++#[macro_use] ++extern crate core_foundation; ++ ++use core_foundation::base::{CFComparisonResult, TCFType}; ++use std::os::raw::c_void; ++ ++// sys equivalent stuff that must be declared ++ ++#[repr(C)] ++pub struct __CFFooBar(c_void); ++ ++pub type CFFooBarRef = *const __CFFooBar; ++ ++extern "C" { ++ pub fn CFFooBarGetTypeID() -> core_foundation::base::CFTypeID; ++ pub fn fake_compare( ++ this: CFFooBarRef, ++ other: CFFooBarRef, ++ context: *mut c_void, ++ ) -> CFComparisonResult; ++} ++ ++// Try to use the macros outside of the crate ++ ++declare_TCFType!(CFFooBar, CFFooBarRef); ++impl_TCFType!(CFFooBar, CFFooBarRef, CFFooBarGetTypeID); ++impl_CFTypeDescription!(CFFooBar); ++impl_CFComparison!(CFFooBar, fake_compare); diff --cc vendor/core-foundation-sys-0.6.1/.cargo-checksum.json index 000000000,000000000..ec4a76eaf new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9"} diff --cc vendor/core-foundation-sys-0.6.1/Cargo.toml index 000000000,000000000..12e7e59b2 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/Cargo.toml @@@ -1,0 -1,0 +1,27 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "core-foundation-sys" ++version = "0.6.1" ++authors = ["The Servo Project Developers"] ++build = "build.rs" ++description = "Bindings to Core Foundation for OS X" ++homepage = "https://github.com/servo/core-foundation-rs" ++license = "MIT / Apache-2.0" ++repository = "https://github.com/servo/core-foundation-rs" ++ ++[dependencies] ++ ++[features] ++mac_os_10_7_support = [] ++mac_os_10_8_features = [] diff --cc vendor/core-foundation-sys-0.6.1/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/core-foundation-sys-0.6.1/LICENSE-MIT index 000000000,000000000..807526f57 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2012-2013 Mozilla Foundation ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/core-foundation-sys-0.6.1/build.rs index 000000000,000000000..1f03b0602 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/build.rs @@@ -1,0 -1,0 +1,14 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++fn main() { ++ if std::env::var("TARGET").unwrap().contains("-apple") { ++ println!("cargo:rustc-link-lib=framework=CoreFoundation"); ++ } ++} diff --cc vendor/core-foundation-sys-0.6.1/src/array.rs index 000000000,000000000..5090302fc new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/array.rs @@@ -1,0 -1,0 +1,55 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFRange, CFIndex, CFAllocatorRef, CFTypeID, Boolean}; ++use string::CFStringRef; ++ ++pub type CFArrayRetainCallBack = extern "C" fn(allocator: CFAllocatorRef, value: *const c_void) -> *const c_void; ++pub type CFArrayReleaseCallBack = extern "C" fn(allocator: CFAllocatorRef, value: *const c_void); ++pub type CFArrayCopyDescriptionCallBack = extern "C" fn(value: *const c_void) -> CFStringRef; ++pub type CFArrayEqualCallBack = extern "C" fn(value1: *const c_void, value2: *const c_void) -> Boolean; ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFArrayCallBacks { ++ pub version: CFIndex, ++ pub retain: CFArrayRetainCallBack, ++ pub release: CFArrayReleaseCallBack, ++ pub copyDescription: CFArrayCopyDescriptionCallBack, ++ pub equal: CFArrayEqualCallBack, ++} ++ ++#[repr(C)] ++pub struct __CFArray(c_void); ++ ++pub type CFArrayRef = *const __CFArray; ++ ++extern { ++ /* ++ * CFArray.h ++ */ ++ pub static kCFTypeArrayCallBacks: CFArrayCallBacks; ++ ++ pub fn CFArrayCreate(allocator: CFAllocatorRef, values: *const *const c_void, ++ numValues: CFIndex, callBacks: *const CFArrayCallBacks) -> CFArrayRef; ++ pub fn CFArrayCreateCopy(allocator: CFAllocatorRef , theArray: CFArrayRef) -> CFArrayRef; ++ ++ // CFArrayBSearchValues ++ // CFArrayContainsValue ++ pub fn CFArrayGetCount(theArray: CFArrayRef) -> CFIndex; ++ // CFArrayGetCountOfValue ++ // CFArrayGetFirstIndexOfValue ++ // CFArrayGetLastIndexOfValue ++ pub fn CFArrayGetValues(theArray: CFArrayRef, range: CFRange, values: *mut *const c_void); ++ pub fn CFArrayGetValueAtIndex(theArray: CFArrayRef, idx: CFIndex) -> *const c_void; ++ // CFArrayApplyFunction ++ pub fn CFArrayGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/attributed_string.rs index 000000000,000000000..ecdffe629 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/attributed_string.rs @@@ -1,0 -1,0 +1,56 @@@ ++// Copyright 2013 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++use base::{CFAllocatorRef, CFTypeRef, CFIndex, CFRange, CFTypeID}; ++use string::CFStringRef; ++use dictionary::CFDictionaryRef; ++ ++#[repr(C)] ++pub struct __CFAttributedString(c_void); ++ ++pub type CFAttributedStringRef = *const __CFAttributedString; ++pub type CFMutableAttributedStringRef = *const __CFAttributedString; ++ ++extern { ++ /* CFAttributedString */ ++ ++ pub fn CFAttributedStringCreate( ++ allocator: CFAllocatorRef, ++ str: CFStringRef, ++ attributes: CFDictionaryRef, ++ ) -> CFAttributedStringRef; ++ ++ pub fn CFAttributedStringGetLength(astr: CFAttributedStringRef) -> CFIndex; ++ ++ pub fn CFAttributedStringGetTypeID() -> CFTypeID; ++ ++ /* CFMutableAttributedString */ ++ ++ pub fn CFAttributedStringCreateMutableCopy( ++ allocator: CFAllocatorRef, max_length: CFIndex, astr: CFAttributedStringRef ++ ) -> CFMutableAttributedStringRef; ++ ++ pub fn CFAttributedStringCreateMutable( ++ allocator: CFAllocatorRef, ++ max_length: CFIndex, ++ ) -> CFMutableAttributedStringRef; ++ ++ pub fn CFAttributedStringReplaceString( ++ astr: CFMutableAttributedStringRef, range: CFRange, replacement: CFStringRef); ++ ++ pub fn CFAttributedStringSetAttribute( ++ astr: CFMutableAttributedStringRef, ++ range: CFRange, ++ attr_name: CFStringRef, ++ value: CFTypeRef, ++ ); ++ ++ pub fn CFMutableAttributedStringGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/base.rs index 000000000,000000000..8cbae3805 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/base.rs @@@ -1,0 -1,0 +1,154 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::cmp::Ordering; ++use std::os::raw::{c_uint, c_long, c_ulong, c_void, c_int}; ++use string::CFStringRef; ++ ++pub type Boolean = u8; ++pub type CFIndex = c_long; ++pub type mach_port_t = c_uint; ++pub type CFAllocatorRef = *const c_void; ++pub type CFNullRef = *const c_void; ++pub type CFHashCode = c_ulong; ++pub type CFTypeID = c_ulong; ++pub type CFTypeRef = *const c_void; ++pub type CFOptionFlags = u32; ++pub type OSStatus = i32; ++pub type SInt32 = c_int; ++ ++#[repr(i64)] ++#[derive(Clone, Copy)] ++pub enum CFComparisonResult { ++ LessThan = -1, ++ EqualTo = 0, ++ GreaterThan = 1, ++} ++ ++impl Into for CFComparisonResult { ++ fn into(self) -> Ordering { ++ match self { ++ CFComparisonResult::LessThan => Ordering::Less, ++ CFComparisonResult::EqualTo => Ordering::Equal, ++ CFComparisonResult::GreaterThan => Ordering::Greater ++ } ++ } ++} ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFRange { ++ pub location: CFIndex, ++ pub length: CFIndex ++} ++ ++// for back-compat ++impl CFRange { ++ pub fn init(location: CFIndex, length: CFIndex) -> CFRange { ++ CFRange { ++ location: location, ++ length: length, ++ } ++ } ++} ++ ++pub type CFAllocatorRetainCallBack = extern "C" fn(info: *mut c_void) -> *mut c_void; ++pub type CFAllocatorReleaseCallBack = extern "C" fn(info: *mut c_void); ++pub type CFAllocatorCopyDescriptionCallBack = extern "C" fn(info: *mut c_void) -> CFStringRef; ++pub type CFAllocatorAllocateCallBack = extern "C" fn(allocSize: CFIndex, hint: CFOptionFlags, info: *mut c_void) -> *mut c_void; ++pub type CFAllocatorReallocateCallBack = extern "C" fn(ptr: *mut c_void, newsize: CFIndex, hint: CFOptionFlags, info: *mut c_void) -> *mut c_void; ++pub type CFAllocatorDeallocateCallBack = extern "C" fn(ptr: *mut c_void, info: *mut c_void); ++pub type CFAllocatorPreferredSizeCallBack = extern "C" fn(size: CFIndex, hint: CFOptionFlags, info: *mut c_void) -> CFIndex; ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFAllocatorContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: CFAllocatorRetainCallBack, ++ pub release: CFAllocatorReleaseCallBack, ++ pub copyDescription: CFAllocatorCopyDescriptionCallBack, ++ pub allocate: CFAllocatorAllocateCallBack, ++ pub reallocate: CFAllocatorReallocateCallBack, ++ pub deallocate: CFAllocatorDeallocateCallBack, ++ pub preferredSize: CFAllocatorPreferredSizeCallBack ++} ++ ++/// Trait for all types which are Core Foundation reference types. ++pub trait TCFTypeRef { ++ fn as_void_ptr(&self) -> *const c_void; ++ ++ unsafe fn from_void_ptr(ptr: *const c_void) -> Self; ++} ++ ++impl TCFTypeRef for *const T { ++ fn as_void_ptr(&self) -> *const c_void { ++ (*self) as *const c_void ++ } ++ ++ unsafe fn from_void_ptr(ptr: *const c_void) -> Self { ++ ptr as *const T ++ } ++} ++ ++impl TCFTypeRef for *mut T { ++ fn as_void_ptr(&self) -> *const c_void { ++ (*self) as *const T as *const c_void ++ } ++ ++ unsafe fn from_void_ptr(ptr: *const c_void) -> Self { ++ ptr as *const T as *mut T ++ } ++} ++ ++extern { ++ /* ++ * CFBase.h ++ */ ++ ++ /* CFAllocator Reference */ ++ ++ pub static kCFAllocatorDefault: CFAllocatorRef; ++ pub static kCFAllocatorSystemDefault: CFAllocatorRef; ++ pub static kCFAllocatorMalloc: CFAllocatorRef; ++ pub static kCFAllocatorMallocZone: CFAllocatorRef; ++ pub static kCFAllocatorNull: CFAllocatorRef; ++ pub static kCFAllocatorUseContext: CFAllocatorRef; ++ ++ pub fn CFAllocatorCreate(allocator: CFAllocatorRef, context: *mut CFAllocatorContext) -> CFAllocatorRef; ++ pub fn CFAllocatorAllocate(allocator: CFAllocatorRef, size: CFIndex, hint: CFOptionFlags) -> *mut c_void; ++ pub fn CFAllocatorDeallocate(allocator: CFAllocatorRef, ptr: *mut c_void); ++ pub fn CFAllocatorGetPreferredSizeForSize(allocator: CFAllocatorRef, size: CFIndex, hint: CFOptionFlags) -> CFIndex; ++ pub fn CFAllocatorReallocate(allocator: CFAllocatorRef, ptr: *mut c_void, newsize: CFIndex, hint: CFOptionFlags) -> *mut c_void; ++ pub fn CFAllocatorGetDefault() -> CFAllocatorRef; ++ pub fn CFAllocatorSetDefault(allocator: CFAllocatorRef); ++ pub fn CFAllocatorGetContext(allocator: CFAllocatorRef, context: *mut CFAllocatorContext); ++ pub fn CFAllocatorGetTypeID() -> CFTypeID; ++ ++ /* CFNull Reference */ ++ ++ pub static kCFNull: CFNullRef; ++ ++ /* CFType Reference */ ++ ++ //fn CFCopyTypeIDDescription ++ //fn CFGetAllocator ++ pub fn CFCopyDescription(cf: CFTypeRef) -> CFStringRef; ++ pub fn CFEqual(cf1: CFTypeRef, cf2: CFTypeRef) -> Boolean; ++ pub fn CFGetRetainCount(cf: CFTypeRef) -> CFIndex; ++ pub fn CFGetTypeID(cf: CFTypeRef) -> CFTypeID; ++ pub fn CFHash(cf: CFTypeRef) -> CFHashCode; ++ //fn CFMakeCollectable ++ pub fn CFRelease(cf: CFTypeRef); ++ pub fn CFRetain(cf: CFTypeRef) -> CFTypeRef; ++ pub fn CFShow(obj: CFTypeRef); ++ ++ /* Base Utilities Reference */ ++ // N.B. Some things missing here. ++} diff --cc vendor/core-foundation-sys-0.6.1/src/bundle.rs index 000000000,000000000..687b00ee6 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/bundle.rs @@@ -1,0 -1,0 +1,36 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFTypeID, CFAllocatorRef}; ++use url::CFURLRef; ++use dictionary::CFDictionaryRef; ++use string::CFStringRef; ++ ++#[repr(C)] ++pub struct __CFBundle(c_void); ++ ++pub type CFBundleRef = *mut __CFBundle; ++ ++extern { ++ /* ++ * CFBundle.h ++ */ ++ pub fn CFBundleCreate(allocator: CFAllocatorRef, bundleURL: CFURLRef) -> CFBundleRef; ++ ++ pub fn CFBundleGetBundleWithIdentifier(bundleID: CFStringRef) -> CFBundleRef; ++ pub fn CFBundleGetFunctionPointerForName(bundle: CFBundleRef, function_name: CFStringRef) -> *const c_void; ++ pub fn CFBundleGetMainBundle() -> CFBundleRef; ++ pub fn CFBundleGetInfoDictionary(bundle: CFBundleRef) -> CFDictionaryRef; ++ ++ pub fn CFBundleGetTypeID() -> CFTypeID; ++ pub fn CFBundleCopyExecutableURL(bundle: CFBundleRef) -> CFURLRef; ++ pub fn CFBundleCopyPrivateFrameworksURL(bundle: CFBundleRef) -> CFURLRef; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/data.rs index 000000000,000000000..51b3a4cd8 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/data.rs @@@ -1,0 -1,0 +1,31 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFTypeID, CFIndex}; ++ ++#[repr(C)] ++pub struct __CFData(c_void); ++ ++pub type CFDataRef = *const __CFData; ++ ++extern { ++ /* ++ * CFData.h ++ */ ++ ++ pub fn CFDataCreate(allocator: CFAllocatorRef, ++ bytes: *const u8, length: CFIndex) -> CFDataRef; ++ //fn CFDataFind ++ pub fn CFDataGetBytePtr(theData: CFDataRef) -> *const u8; ++ pub fn CFDataGetLength(theData: CFDataRef) -> CFIndex; ++ ++ pub fn CFDataGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/date.rs index 000000000,000000000..f83ce1dd1 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/date.rs @@@ -1,0 -1,0 +1,34 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFComparisonResult, CFTypeID}; ++ ++#[repr(C)] ++pub struct __CFDate(c_void); ++ ++pub type CFDateRef = *const __CFDate; ++ ++pub type CFTimeInterval = f64; ++pub type CFAbsoluteTime = CFTimeInterval; ++ ++extern { ++ pub static kCFAbsoluteTimeIntervalSince1904: CFTimeInterval; ++ pub static kCFAbsoluteTimeIntervalSince1970: CFTimeInterval; ++ ++ pub fn CFAbsoluteTimeGetCurrent() -> CFAbsoluteTime; ++ ++ pub fn CFDateCreate(allocator: CFAllocatorRef, at: CFAbsoluteTime) -> CFDateRef; ++ pub fn CFDateGetAbsoluteTime(date: CFDateRef) -> CFAbsoluteTime; ++ pub fn CFDateGetTimeIntervalSinceDate(date: CFDateRef, other: CFDateRef) -> CFTimeInterval; ++ pub fn CFDateCompare(date: CFDateRef, other: CFDateRef, context: *mut c_void) -> CFComparisonResult; ++ ++ pub fn CFDateGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/dictionary.rs index 000000000,000000000..d10e9c120 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/dictionary.rs @@@ -1,0 -1,0 +1,91 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFHashCode, CFIndex, CFTypeID, Boolean}; ++use string::CFStringRef; ++ ++pub type CFDictionaryApplierFunction = extern "C" fn(key: *const c_void, value: *const c_void, context: *mut c_void); ++ ++pub type CFDictionaryRetainCallBack = extern "C" fn(allocator: CFAllocatorRef, value: *const c_void) -> *const c_void; ++pub type CFDictionaryReleaseCallBack = extern "C" fn(allocator: CFAllocatorRef, value: *const c_void); ++pub type CFDictionaryCopyDescriptionCallBack = extern "C" fn(value: *const c_void) -> CFStringRef; ++pub type CFDictionaryEqualCallBack = extern "C" fn(value1: *const c_void, value2: *const c_void) -> Boolean; ++pub type CFDictionaryHashCallBack = extern "C" fn(value: *const c_void) -> CFHashCode; ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFDictionaryKeyCallBacks { ++ pub version: CFIndex, ++ pub retain: CFDictionaryRetainCallBack, ++ pub release: CFDictionaryReleaseCallBack, ++ pub copyDescription: CFDictionaryCopyDescriptionCallBack, ++ pub equal: CFDictionaryEqualCallBack, ++ pub hash: CFDictionaryHashCallBack ++} ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFDictionaryValueCallBacks { ++ pub version: CFIndex, ++ pub retain: CFDictionaryRetainCallBack, ++ pub release: CFDictionaryReleaseCallBack, ++ pub copyDescription: CFDictionaryCopyDescriptionCallBack, ++ pub equal: CFDictionaryEqualCallBack ++} ++ ++#[repr(C)] ++pub struct __CFDictionary(c_void); ++ ++pub type CFDictionaryRef = *const __CFDictionary; ++pub type CFMutableDictionaryRef = *mut __CFDictionary; ++ ++extern { ++ /* ++ * CFDictionary.h ++ */ ++ ++ pub static kCFTypeDictionaryKeyCallBacks: CFDictionaryKeyCallBacks; ++ pub static kCFTypeDictionaryValueCallBacks: CFDictionaryValueCallBacks; ++ ++ pub fn CFDictionaryContainsKey(theDict: CFDictionaryRef, key: *const c_void) -> Boolean; ++ pub fn CFDictionaryCreate(allocator: CFAllocatorRef, keys: *const *const c_void, values: *const *const c_void, ++ numValues: CFIndex, keyCallBacks: *const CFDictionaryKeyCallBacks, ++ valueCallBacks: *const CFDictionaryValueCallBacks) ++ -> CFDictionaryRef; ++ pub fn CFDictionaryGetCount(theDict: CFDictionaryRef) -> CFIndex; ++ pub fn CFDictionaryGetTypeID() -> CFTypeID; ++ pub fn CFDictionaryGetValueIfPresent(theDict: CFDictionaryRef, key: *const c_void, value: *mut *const c_void) ++ -> Boolean; ++ pub fn CFDictionaryApplyFunction(theDict: CFDictionaryRef, ++ applier: CFDictionaryApplierFunction, ++ context: *mut c_void); ++ pub fn CFDictionaryGetKeysAndValues(theDict: CFDictionaryRef, ++ keys: *mut *const c_void, ++ values: *mut *const c_void); ++ ++ pub fn CFDictionaryCreateMutable(allocator: CFAllocatorRef, capacity: CFIndex, ++ keyCallbacks: *const CFDictionaryKeyCallBacks, ++ valueCallbacks: *const CFDictionaryValueCallBacks) -> CFMutableDictionaryRef; ++ pub fn CFDictionaryCreateMutableCopy(allocator: CFAllocatorRef, capacity: CFIndex, ++ theDict: CFDictionaryRef) -> CFMutableDictionaryRef; ++ pub fn CFDictionaryAddValue(theDict: CFMutableDictionaryRef, ++ key: *const c_void, ++ value: *const c_void); ++ pub fn CFDictionarySetValue(theDict: CFMutableDictionaryRef, ++ key: *const c_void, ++ value: *const c_void); ++ pub fn CFDictionaryReplaceValue(theDict: CFMutableDictionaryRef, ++ key: *const c_void, ++ value: *const c_void); ++ pub fn CFDictionaryRemoveValue(theDict: CFMutableDictionaryRef, ++ key: *const c_void); ++ pub fn CFDictionaryRemoveAllValues(theDict: CFMutableDictionaryRef); ++} diff --cc vendor/core-foundation-sys-0.6.1/src/error.rs index 000000000,000000000..8a4c1d494 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/error.rs @@@ -1,0 -1,0 +1,32 @@@ ++// Copyright 2016 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFTypeID, CFIndex}; ++use string::CFStringRef; ++ ++#[repr(C)] ++pub struct __CFError(c_void); ++ ++pub type CFErrorRef = *mut __CFError; ++ ++extern "C" { ++ pub fn CFErrorGetTypeID() -> CFTypeID; ++ ++ pub static kCFErrorDomainPOSIX: CFStringRef; ++ pub static kCFErrorDomainOSStatus: CFStringRef; ++ pub static kCFErrorDomainMach: CFStringRef; ++ pub static kCFErrorDomainCocoa: CFStringRef; ++ ++ pub fn CFErrorGetDomain(err: CFErrorRef) -> CFStringRef; ++ pub fn CFErrorGetCode(err: CFErrorRef) -> CFIndex; ++ ++ pub fn CFErrorCopyDescription(err: CFErrorRef) -> CFStringRef; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/filedescriptor.rs index 000000000,000000000..3f51d1072 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/filedescriptor.rs @@@ -1,0 -1,0 +1,58 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::{c_int, c_void}; ++ ++use base::{Boolean, CFIndex, CFTypeID, CFOptionFlags, CFAllocatorRef}; ++use string::CFStringRef; ++use runloop::CFRunLoopSourceRef; ++ ++pub type CFFileDescriptorNativeDescriptor = c_int; ++ ++#[repr(C)] ++pub struct __CFFileDescriptor(c_void); ++ ++pub type CFFileDescriptorRef = *mut __CFFileDescriptor; ++ ++/* Callback Reason Types */ ++pub const kCFFileDescriptorReadCallBack: CFOptionFlags = 1 << 0; ++pub const kCFFileDescriptorWriteCallBack: CFOptionFlags = 1 << 1; ++ ++pub type CFFileDescriptorCallBack = extern "C" fn (f: CFFileDescriptorRef, callBackTypes: CFOptionFlags, info: *mut c_void); ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFFileDescriptorContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: Option *const c_void>, ++ pub release: Option, ++ pub copyDescription: Option CFStringRef>, ++} ++ ++extern { ++ /* ++ * CFFileDescriptor.h ++ */ ++ pub fn CFFileDescriptorGetTypeID() -> CFTypeID; ++ ++ pub fn CFFileDescriptorCreate(allocator: CFAllocatorRef, fd: CFFileDescriptorNativeDescriptor, closeOnInvalidate: Boolean, callout: CFFileDescriptorCallBack, context: *const CFFileDescriptorContext) -> CFFileDescriptorRef; ++ ++ pub fn CFFileDescriptorGetNativeDescriptor(f: CFFileDescriptorRef) -> CFFileDescriptorNativeDescriptor; ++ ++ pub fn CFFileDescriptorGetContext(f: CFFileDescriptorRef, context: *mut CFFileDescriptorContext); ++ ++ pub fn CFFileDescriptorEnableCallBacks(f: CFFileDescriptorRef, callBackTypes: CFOptionFlags); ++ pub fn CFFileDescriptorDisableCallBacks(f: CFFileDescriptorRef, callBackTypes: CFOptionFlags); ++ ++ pub fn CFFileDescriptorInvalidate(f: CFFileDescriptorRef); ++ pub fn CFFileDescriptorIsValid(f: CFFileDescriptorRef) -> Boolean; ++ ++ pub fn CFFileDescriptorCreateRunLoopSource(allocator: CFAllocatorRef, f: CFFileDescriptorRef, order: CFIndex) -> CFRunLoopSourceRef; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/lib.rs index 000000000,000000000..e03cddbe5 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/lib.rs @@@ -1,0 -1,0 +1,30 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, improper_ctypes)] ++ ++#![cfg_attr(all(feature="mac_os_10_7_support", feature="mac_os_10_8_features"), feature(linkage))] // back-compat requires weak linkage ++ ++pub mod array; ++pub mod attributed_string; ++pub mod base; ++pub mod bundle; ++pub mod data; ++pub mod date; ++pub mod dictionary; ++pub mod error; ++pub mod filedescriptor; ++pub mod messageport; ++pub mod number; ++pub mod propertylist; ++pub mod runloop; ++pub mod set; ++pub mod string; ++pub mod timezone; ++pub mod url; ++pub mod uuid; diff --cc vendor/core-foundation-sys-0.6.1/src/messageport.rs index 000000000,000000000..e33d9aa4b new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/messageport.rs @@@ -1,0 -1,0 +1,79 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFIndex, CFTypeID, Boolean}; ++use data::CFDataRef; ++use date::CFTimeInterval; ++use runloop::CFRunLoopSourceRef; ++use string::CFStringRef; ++ ++#[repr(C)] ++#[derive(Copy, Clone)] ++#[derive(Debug)] ++pub struct CFMessagePortContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: Option *const c_void>, ++ pub release: Option, ++ pub copyDescription: Option CFStringRef>, ++} ++ ++pub type CFMessagePortCallBack = Option< ++ unsafe extern fn(local: CFMessagePortRef, ++ msgid: i32, ++ data: CFDataRef, ++ info: *mut c_void) -> CFDataRef>; ++ ++pub type CFMessagePortInvalidationCallBack = Option< ++ unsafe extern "C" fn(ms: CFMessagePortRef, info: *mut c_void)>; ++ ++#[repr(C)] ++pub struct __CFMessagePort(c_void); ++pub type CFMessagePortRef = *mut __CFMessagePort; ++ ++extern { ++ /* ++ * CFMessagePort.h ++ */ ++ pub fn CFMessagePortGetTypeID() -> CFTypeID; ++ pub fn CFMessagePortCreateLocal(allocator: CFAllocatorRef, ++ name: CFStringRef, ++ callout: CFMessagePortCallBack, ++ context: *const CFMessagePortContext, ++ shouldFreeInfo: *mut Boolean) ++ -> CFMessagePortRef; ++ pub fn CFMessagePortCreateRemote(allocator: CFAllocatorRef, ++ name: CFStringRef) -> CFMessagePortRef; ++ pub fn CFMessagePortIsRemote(ms: CFMessagePortRef) -> Boolean; ++ pub fn CFMessagePortGetName(ms: CFMessagePortRef) -> CFStringRef; ++ pub fn CFMessagePortSetName(ms: CFMessagePortRef, newName: CFStringRef) ++ -> Boolean; ++ pub fn CFMessagePortGetContext(ms: CFMessagePortRef, ++ context: *mut CFMessagePortContext); ++ pub fn CFMessagePortInvalidate(ms: CFMessagePortRef); ++ pub fn CFMessagePortIsValid(ms: CFMessagePortRef) -> Boolean; ++ pub fn CFMessagePortGetInvalidationCallBack(ms: CFMessagePortRef) ++ -> CFMessagePortInvalidationCallBack; ++ pub fn CFMessagePortSetInvalidationCallBack(ms: CFMessagePortRef, ++ callout: CFMessagePortInvalidationCallBack); ++ pub fn CFMessagePortSendRequest(remote: CFMessagePortRef, msgid: i32, ++ data: CFDataRef, ++ sendTimeout: CFTimeInterval, ++ rcvTimeout: CFTimeInterval, ++ replyMode: CFStringRef, ++ returnData: *mut CFDataRef) -> i32; ++ pub fn CFMessagePortCreateRunLoopSource(allocator: CFAllocatorRef, ++ local: CFMessagePortRef, ++ order: CFIndex) ++ -> CFRunLoopSourceRef; ++ // CFMessagePortSetDispatchQueue ++} diff --cc vendor/core-foundation-sys-0.6.1/src/number.rs index 000000000,000000000..931b95da0 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/number.rs @@@ -1,0 -1,0 +1,60 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFTypeID, CFComparisonResult}; ++ ++#[repr(C)] ++pub struct __CFBoolean(c_void); ++ ++pub type CFBooleanRef = *const __CFBoolean; ++ ++pub type CFNumberType = u32; ++ ++// members of enum CFNumberType ++// static kCFNumberSInt8Type: CFNumberType = 1; ++// static kCFNumberSInt16Type: CFNumberType = 2; ++pub static kCFNumberSInt32Type: CFNumberType = 3; ++pub static kCFNumberSInt64Type: CFNumberType = 4; ++pub static kCFNumberFloat32Type: CFNumberType = 5; ++pub static kCFNumberFloat64Type: CFNumberType = 6; ++// static kCFNumberCharType: CFNumberType = 7; ++// static kCFNumberShortType: CFNumberType = 8; ++// static kCFNumberIntType: CFNumberType = 9; ++// static kCFNumberLongType: CFNumberType = 10; ++// static kCFNumberLongLongType: CFNumberType = 11; ++// static kCFNumberFloatType: CFNumberType = 12; ++// static kCFNumberDoubleType: CFNumberType = 13; ++// static kCFNumberCFIndexType: CFNumberType = 14; ++// static kCFNumberNSIntegerType: CFNumberType = 15; ++// static kCFNumberCGFloatType: CFNumberType = 16; ++// static kCFNumberMaxType: CFNumberType = 16; ++ ++// This is an enum due to zero-sized types warnings. ++// For more details see https://github.com/rust-lang/rust/issues/27303 ++pub enum __CFNumber {} ++ ++pub type CFNumberRef = *const __CFNumber; ++ ++extern { ++ /* ++ * CFNumber.h ++ */ ++ pub static kCFBooleanTrue: CFBooleanRef; ++ pub static kCFBooleanFalse: CFBooleanRef; ++ ++ pub fn CFBooleanGetTypeID() -> CFTypeID; ++ pub fn CFNumberCreate(allocator: CFAllocatorRef, theType: CFNumberType, valuePtr: *const c_void) ++ -> CFNumberRef; ++ //fn CFNumberGetByteSize ++ pub fn CFNumberGetValue(number: CFNumberRef, theType: CFNumberType, valuePtr: *mut c_void) -> bool; ++ pub fn CFNumberCompare(date: CFNumberRef, other: CFNumberRef, context: *mut c_void) -> CFComparisonResult; ++ pub fn CFNumberGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/propertylist.rs index 000000000,000000000..574c4d13f new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/propertylist.rs @@@ -1,0 -1,0 +1,46 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use base::{CFAllocatorRef, CFIndex, CFOptionFlags, CFTypeRef}; ++use data::CFDataRef; ++use error::CFErrorRef; ++ ++pub type CFPropertyListRef = CFTypeRef; ++ ++pub type CFPropertyListFormat = CFIndex; ++pub const kCFPropertyListOpenStepFormat: CFPropertyListFormat = 1; ++pub const kCFPropertyListXMLFormat_v1_0: CFPropertyListFormat = 100; ++pub const kCFPropertyListBinaryFormat_v1_0: CFPropertyListFormat = 200; ++ ++pub type CFPropertyListMutabilityOptions = CFOptionFlags; ++pub const kCFPropertyListImmutable: CFPropertyListMutabilityOptions = 0; ++pub const kCFPropertyListMutableContainers: CFPropertyListMutabilityOptions = 1; ++pub const kCFPropertyListMutableContainersAndLeaves: CFPropertyListMutabilityOptions = 2; ++ ++extern "C" { ++ // CFPropertyList.h ++ // ++ ++ // fn CFPropertyListCreateDeepCopy ++ // fn CFPropertyListIsValid ++ pub fn CFPropertyListCreateWithData(allocator: CFAllocatorRef, ++ data: CFDataRef, ++ options: CFPropertyListMutabilityOptions, ++ format: *mut CFPropertyListFormat, ++ error: *mut CFErrorRef) ++ -> CFPropertyListRef; ++ // fn CFPropertyListCreateWithStream ++ // fn CFPropertyListWrite ++ pub fn CFPropertyListCreateData(allocator: CFAllocatorRef, ++ propertyList: CFPropertyListRef, ++ format: CFPropertyListFormat, ++ options: CFOptionFlags, ++ error: *mut CFErrorRef) ++ -> CFDataRef; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/runloop.rs index 000000000,000000000..5de7b8751 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/runloop.rs @@@ -1,0 -1,0 +1,164 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use array::CFArrayRef; ++use base::{Boolean, CFIndex, CFTypeID, CFAllocatorRef, CFOptionFlags, CFHashCode, mach_port_t}; ++use date::{CFAbsoluteTime, CFTimeInterval}; ++use string::CFStringRef; ++ ++#[repr(C)] ++pub struct __CFRunLoop(c_void); ++ ++pub type CFRunLoopRef = *mut __CFRunLoop; ++ ++#[repr(C)] ++pub struct __CFRunLoopSource(c_void); ++ ++pub type CFRunLoopSourceRef = *mut __CFRunLoopSource; ++ ++#[repr(C)] ++pub struct __CFRunLoopObserver(c_void); ++ ++pub type CFRunLoopObserverRef = *mut __CFRunLoopObserver; ++ ++// Reasons for CFRunLoopRunInMode() to Return ++pub const kCFRunLoopRunFinished: i32 = 1; ++pub const kCFRunLoopRunStopped: i32 = 2; ++pub const kCFRunLoopRunTimedOut: i32 = 3; ++pub const kCFRunLoopRunHandledSource: i32 = 4; ++ ++// Run Loop Observer Activities ++//typedef CF_OPTIONS(CFOptionFlags, CFRunLoopActivity) { ++pub type CFRunLoopActivity = CFOptionFlags; ++pub const kCFRunLoopEntry: CFOptionFlags = 1 << 0; ++pub const kCFRunLoopBeforeTimers: CFOptionFlags = 1 << 1; ++pub const kCFRunLoopBeforeSources: CFOptionFlags = 1 << 2; ++pub const kCFRunLoopBeforeWaiting: CFOptionFlags = 1 << 5; ++pub const kCFRunLoopAfterWaiting: CFOptionFlags = 1 << 6; ++pub const kCFRunLoopExit: CFOptionFlags = 1 << 7; ++pub const kCFRunLoopAllActivities: CFOptionFlags = 0x0FFFFFFF; ++ ++#[repr(C)] ++pub struct CFRunLoopSourceContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: extern "C" fn (info: *const c_void) -> *const c_void, ++ pub release: extern "C" fn (info: *const c_void), ++ pub copyDescription: extern "C" fn (info: *const c_void) -> CFStringRef, ++ pub equal: extern "C" fn (info1: *const c_void, info2: *const c_void) -> Boolean, ++ pub hash: extern "C" fn (info: *const c_void) -> CFHashCode, ++ pub schedule: extern "C" fn (info: *const c_void, rl: CFRunLoopRef, mode: CFStringRef), ++ pub cancel: extern "C" fn (info: *const c_void, rl: CFRunLoopRef, mode: CFStringRef), ++ pub perform: extern "C" fn (info: *const c_void), ++} ++ ++#[repr(C)] ++pub struct CFRunLoopSourceContext1 { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: extern "C" fn (info: *const c_void) -> *const c_void, ++ pub release: extern "C" fn (info: *const c_void), ++ pub copyDescription: extern "C" fn (info: *const c_void) -> CFStringRef, ++ pub equal: extern "C" fn (info1: *const c_void, info2: *const c_void) -> Boolean, ++ pub hash: extern "C" fn (info: *const c_void) -> CFHashCode, ++ // note that the following two fields are platform dependent in the C header, the ones here are for OS X ++ pub getPort: extern "C" fn (info: *mut c_void) -> mach_port_t, ++ pub perform: extern "C" fn (msg: *mut c_void, size: CFIndex, allocator: CFAllocatorRef, info: *mut c_void) -> *mut c_void, ++} ++ ++#[repr(C)] ++pub struct CFRunLoopObserverContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: extern "C" fn (info: *const c_void) -> *const c_void, ++ pub release: extern "C" fn (info: *const c_void), ++ pub copyDescription: extern "C" fn (info: *const c_void) -> CFStringRef, ++} ++ ++pub type CFRunLoopObserverCallBack = extern "C" fn (observer: CFRunLoopObserverRef, activity: CFRunLoopActivity, info: *mut c_void); ++ ++#[repr(C)] ++pub struct CFRunLoopTimerContext { ++ pub version: CFIndex, ++ pub info: *mut c_void, ++ pub retain: extern "C" fn (info: *const c_void) -> *const c_void, ++ pub release: extern "C" fn (info: *const c_void), ++ pub copyDescription: extern "C" fn (info: *const c_void) -> CFStringRef, ++} ++ ++pub type CFRunLoopTimerCallBack = extern "C" fn (timer: CFRunLoopTimerRef, info: *mut c_void); ++ ++#[repr(C)] ++pub struct __CFRunLoopTimer; ++ ++pub type CFRunLoopTimerRef = *mut __CFRunLoopTimer; ++ ++extern { ++ /* ++ * CFRunLoop.h ++ */ ++ pub static kCFRunLoopDefaultMode: CFStringRef; ++ pub static kCFRunLoopCommonModes: CFStringRef; ++ pub fn CFRunLoopGetTypeID() -> CFTypeID; ++ pub fn CFRunLoopGetCurrent() -> CFRunLoopRef; ++ pub fn CFRunLoopGetMain() -> CFRunLoopRef; ++ pub fn CFRunLoopCopyCurrentMode(rl: CFRunLoopRef) -> CFStringRef; ++ pub fn CFRunLoopCopyAllModes(rl: CFRunLoopRef) -> CFArrayRef; ++ pub fn CFRunLoopAddCommonMode(rl: CFRunLoopRef, mode: CFStringRef); ++ pub fn CFRunLoopGetNextTimerFireDate(rl: CFRunLoopRef, mode: CFStringRef) -> CFAbsoluteTime; ++ pub fn CFRunLoopRun(); ++ pub fn CFRunLoopRunInMode(mode: CFStringRef, seconds: CFTimeInterval, returnAfterSourceHandled: Boolean) -> i32; ++ pub fn CFRunLoopIsWaiting(rl: CFRunLoopRef) -> Boolean; ++ pub fn CFRunLoopWakeUp(rl: CFRunLoopRef); ++ pub fn CFRunLoopStop(rl: CFRunLoopRef); ++ // fn CFRunLoopPerformBlock(rl: CFRunLoopRef, mode: CFTypeRef, block: void (^)(void)); ++ pub fn CFRunLoopContainsSource(rl: CFRunLoopRef, source: CFRunLoopSourceRef, mode: CFStringRef) -> Boolean; ++ pub fn CFRunLoopAddSource(rl: CFRunLoopRef, source: CFRunLoopSourceRef, mode: CFStringRef); ++ pub fn CFRunLoopRemoveSource(rl: CFRunLoopRef, source: CFRunLoopSourceRef, mode: CFStringRef); ++ pub fn CFRunLoopContainsObserver(rl: CFRunLoopRef, observer: CFRunLoopObserverRef, mode: CFStringRef) -> Boolean; ++ pub fn CFRunLoopAddObserver(rl: CFRunLoopRef, observer: CFRunLoopObserverRef, mode: CFStringRef); ++ pub fn CFRunLoopRemoveObserver(rl: CFRunLoopRef, observer: CFRunLoopObserverRef, mode: CFStringRef); ++ pub fn CFRunLoopContainsTimer(rl: CFRunLoopRef, timer: CFRunLoopTimerRef, mode: CFStringRef) -> Boolean; ++ pub fn CFRunLoopAddTimer(rl: CFRunLoopRef, timer: CFRunLoopTimerRef, mode: CFStringRef); ++ pub fn CFRunLoopRemoveTimer(rl: CFRunLoopRef, timer: CFRunLoopTimerRef, mode: CFStringRef); ++ ++ pub fn CFRunLoopSourceGetTypeID() -> CFTypeID; ++ pub fn CFRunLoopSourceCreate(allocator: CFAllocatorRef, order: CFIndex, context: *mut CFRunLoopSourceContext) -> CFRunLoopSourceRef; ++ pub fn CFRunLoopSourceGetOrder(source: CFRunLoopSourceRef) -> CFIndex; ++ pub fn CFRunLoopSourceInvalidate(source: CFRunLoopSourceRef); ++ pub fn CFRunLoopSourceIsValid(source: CFRunLoopSourceRef) -> Boolean; ++ pub fn CFRunLoopSourceGetContext(source: CFRunLoopSourceRef, context: *mut CFRunLoopSourceContext); ++ pub fn CFRunLoopSourceSignal(source: CFRunLoopSourceRef); ++ ++ pub fn CFRunLoopObserverGetTypeID() -> CFTypeID; ++ pub fn CFRunLoopObserverCreate(allocator: CFAllocatorRef, activities: CFOptionFlags, repeats: Boolean, order: CFIndex, callout: CFRunLoopObserverCallBack, context: *mut CFRunLoopObserverContext) -> CFRunLoopObserverRef; ++ // fn CFRunLoopObserverCreateWithHandler(allocator: CFAllocatorRef, activities: CFOptionFlags, repeats: Boolean, order: CFIndex, block: void (^) (CFRunLoopObserverRef observer, CFRunLoopActivity activity)) -> CFRunLoopObserverRef; ++ pub fn CFRunLoopObserverGetActivities(observer: CFRunLoopObserverRef) -> CFOptionFlags; ++ pub fn CFRunLoopObserverDoesRepeat(observer: CFRunLoopObserverRef) -> Boolean; ++ pub fn CFRunLoopObserverGetOrder(observer: CFRunLoopObserverRef) -> CFIndex; ++ pub fn CFRunLoopObserverInvalidate(observer: CFRunLoopObserverRef); ++ pub fn CFRunLoopObserverIsValid(observer: CFRunLoopObserverRef) -> Boolean; ++ pub fn CFRunLoopObserverGetContext(observer: CFRunLoopObserverRef, context: *mut CFRunLoopObserverContext); ++ ++ pub fn CFRunLoopTimerGetTypeID() -> CFTypeID; ++ pub fn CFRunLoopTimerCreate(allocator: CFAllocatorRef, fireDate: CFAbsoluteTime, interval: CFTimeInterval, flags: CFOptionFlags, order: CFIndex, callout: CFRunLoopTimerCallBack, context: *mut CFRunLoopTimerContext) -> CFRunLoopTimerRef; ++ // fn CFRunLoopTimerCreateWithHandler(allocator: CFAllocatorRef, fireDate: CFAbsoluteTime, interval: CFTimeInterval, flags: CFOptionFlags, order: CFIndex, block: void (^) (CFRunLoopTimerRef timer)) -> CFRunLoopTimerRef; ++ pub fn CFRunLoopTimerGetNextFireDate(timer: CFRunLoopTimerRef) -> CFAbsoluteTime; ++ pub fn CFRunLoopTimerSetNextFireDate(timer: CFRunLoopTimerRef, fireDate: CFAbsoluteTime); ++ pub fn CFRunLoopTimerGetInterval(timer: CFRunLoopTimerRef) -> CFTimeInterval; ++ pub fn CFRunLoopTimerDoesRepeat(timer: CFRunLoopTimerRef) -> Boolean; ++ pub fn CFRunLoopTimerGetOrder(timer: CFRunLoopTimerRef) -> CFIndex; ++ pub fn CFRunLoopTimerInvalidate(timer: CFRunLoopTimerRef); ++ pub fn CFRunLoopTimerIsValid(timer: CFRunLoopTimerRef) -> Boolean; ++ pub fn CFRunLoopTimerGetContext(timer: CFRunLoopTimerRef, context: *mut CFRunLoopTimerContext); ++ pub fn CFRunLoopTimerGetTolerance(timer: CFRunLoopTimerRef) -> CFTimeInterval; ++ pub fn CFRunLoopTimerSetTolerance(timer: CFRunLoopTimerRef, tolerance: CFTimeInterval); ++} diff --cc vendor/core-foundation-sys-0.6.1/src/set.rs index 000000000,000000000..ec4a4bd69 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/set.rs @@@ -1,0 -1,0 +1,58 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFIndex, CFTypeID}; ++ ++pub type CFSetApplierFunction = extern "C" fn (value: *const c_void, ++ context: *const c_void); ++pub type CFSetRetainCallBack = *const u8; ++pub type CFSetReleaseCallBack = *const u8; ++pub type CFSetCopyDescriptionCallBack = *const u8; ++pub type CFSetEqualCallBack = *const u8; ++pub type CFSetHashCallBack = *const u8; ++ ++#[repr(C)] ++#[derive(Clone, Copy)] ++pub struct CFSetCallBacks { ++ pub version: CFIndex, ++ pub retain: CFSetRetainCallBack, ++ pub release: CFSetReleaseCallBack, ++ pub copyDescription: CFSetCopyDescriptionCallBack, ++ pub equal: CFSetEqualCallBack, ++ pub hash: CFSetHashCallBack, ++} ++ ++#[repr(C)] ++pub struct __CFSet(c_void); ++ ++pub type CFSetRef = *const __CFSet; ++ ++extern { ++ /* ++ * CFSet.h ++ */ ++ ++ pub static kCFTypeSetCallBacks: CFSetCallBacks; ++ ++ /* Creating Sets */ ++ pub fn CFSetCreate(allocator: CFAllocatorRef, values: *const *const c_void, numValues: CFIndex, ++ callBacks: *const CFSetCallBacks) -> CFSetRef; ++ ++ /* Applying a Function to Set Members */ ++ pub fn CFSetApplyFunction(theSet: CFSetRef, ++ applier: CFSetApplierFunction, ++ context: *const c_void); ++ ++ pub fn CFSetGetCount(theSet: CFSetRef) -> CFIndex; ++ ++ pub fn CFSetGetTypeID() -> CFTypeID; ++} ++ diff --cc vendor/core-foundation-sys-0.6.1/src/string.rs index 000000000,000000000..a990d9638 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/string.rs @@@ -1,0 -1,0 +1,319 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::{c_char, c_ushort, c_void}; ++ ++use base::{Boolean, CFOptionFlags, CFIndex, CFAllocatorRef, CFRange, CFTypeID}; ++ ++pub type UniChar = c_ushort; ++ ++// CFString.h ++ ++pub type CFStringCompareFlags = CFOptionFlags; ++//static kCFCompareCaseInsensitive: CFStringCompareFlags = 1; ++//static kCFCompareBackwards: CFStringCompareFlags = 4; ++//static kCFCompareAnchored: CFStringCompareFlags = 8; ++//static kCFCompareNonliteral: CFStringCompareFlags = 16; ++//static kCFCompareLocalized: CFStringCompareFlags = 32; ++//static kCFCompareNumerically: CFStringCompareFlags = 64; ++//static kCFCompareDiacriticInsensitive: CFStringCompareFlags = 128; ++//static kCFCompareWidthInsensitive: CFStringCompareFlags = 256; ++//static kCFCompareForcedOrdering: CFStringCompareFlags = 512; ++ ++pub type CFStringEncoding = u32; ++ ++// OS X built-in encodings. ++ ++//static kCFStringEncodingMacRoman: CFStringEncoding = 0; ++//static kCFStringEncodingWindowsLatin1: CFStringEncoding = 0x0500; ++//static kCFStringEncodingISOLatin1: CFStringEncoding = 0x0201; ++//static kCFStringEncodingNextStepLatin: CFStringEncoding = 0x0B01; ++//static kCFStringEncodingASCII: CFStringEncoding = 0x0600; ++//static kCFStringEncodingUnicode: CFStringEncoding = 0x0100; ++pub static kCFStringEncodingUTF8: CFStringEncoding = 0x08000100; ++//static kCFStringEncodingNonLossyASCII: CFStringEncoding = 0x0BFF; ++ ++//static kCFStringEncodingUTF16: CFStringEncoding = 0x0100; ++//static kCFStringEncodingUTF16BE: CFStringEncoding = 0x10000100; ++//static kCFStringEncodingUTF16LE: CFStringEncoding = 0x14000100; ++//static kCFStringEncodingUTF32: CFStringEncoding = 0x0c000100; ++//static kCFStringEncodingUTF32BE: CFStringEncoding = 0x18000100; ++//static kCFStringEncodingUTF32LE: CFStringEncoding = 0x1c000100; ++ ++ ++// CFStringEncodingExt.h ++ ++pub type CFStringEncodings = CFIndex; ++ ++// External encodings, except those defined above. ++// Defined above: kCFStringEncodingMacRoman = 0 ++//static kCFStringEncodingMacJapanese: CFStringEncoding = 1; ++//static kCFStringEncodingMacChineseTrad: CFStringEncoding = 2; ++//static kCFStringEncodingMacKorean: CFStringEncoding = 3; ++//static kCFStringEncodingMacArabic: CFStringEncoding = 4; ++//static kCFStringEncodingMacHebrew: CFStringEncoding = 5; ++//static kCFStringEncodingMacGreek: CFStringEncoding = 6; ++//static kCFStringEncodingMacCyrillic: CFStringEncoding = 7; ++//static kCFStringEncodingMacDevanagari: CFStringEncoding = 9; ++//static kCFStringEncodingMacGurmukhi: CFStringEncoding = 10; ++//static kCFStringEncodingMacGujarati: CFStringEncoding = 11; ++//static kCFStringEncodingMacOriya: CFStringEncoding = 12; ++//static kCFStringEncodingMacBengali: CFStringEncoding = 13; ++//static kCFStringEncodingMacTamil: CFStringEncoding = 14; ++//static kCFStringEncodingMacTelugu: CFStringEncoding = 15; ++//static kCFStringEncodingMacKannada: CFStringEncoding = 16; ++//static kCFStringEncodingMacMalayalam: CFStringEncoding = 17; ++//static kCFStringEncodingMacSinhalese: CFStringEncoding = 18; ++//static kCFStringEncodingMacBurmese: CFStringEncoding = 19; ++//static kCFStringEncodingMacKhmer: CFStringEncoding = 20; ++//static kCFStringEncodingMacThai: CFStringEncoding = 21; ++//static kCFStringEncodingMacLaotian: CFStringEncoding = 22; ++//static kCFStringEncodingMacGeorgian: CFStringEncoding = 23; ++//static kCFStringEncodingMacArmenian: CFStringEncoding = 24; ++//static kCFStringEncodingMacChineseSimp: CFStringEncoding = 25; ++//static kCFStringEncodingMacTibetan: CFStringEncoding = 26; ++//static kCFStringEncodingMacMongolian: CFStringEncoding = 27; ++//static kCFStringEncodingMacEthiopic: CFStringEncoding = 28; ++//static kCFStringEncodingMacCentralEurRoman: CFStringEncoding = 29; ++//static kCFStringEncodingMacVietnamese: CFStringEncoding = 30; ++//static kCFStringEncodingMacExtArabic: CFStringEncoding = 31; ++//static kCFStringEncodingMacSymbol: CFStringEncoding = 33; ++//static kCFStringEncodingMacDingbats: CFStringEncoding = 34; ++//static kCFStringEncodingMacTurkish: CFStringEncoding = 35; ++//static kCFStringEncodingMacCroatian: CFStringEncoding = 36; ++//static kCFStringEncodingMacIcelandic: CFStringEncoding = 37; ++//static kCFStringEncodingMacRomanian: CFStringEncoding = 38; ++//static kCFStringEncodingMacCeltic: CFStringEncoding = 39; ++//static kCFStringEncodingMacGaelic: CFStringEncoding = 40; ++//static kCFStringEncodingMacFarsi: CFStringEncoding = 0x8C; ++//static kCFStringEncodingMacUkrainian: CFStringEncoding = 0x98; ++//static kCFStringEncodingMacInuit: CFStringEncoding = 0xEC; ++//static kCFStringEncodingMacVT100: CFStringEncoding = 0xFC; ++//static kCFStringEncodingMacHFS: CFStringEncoding = 0xFF; ++// Defined above: kCFStringEncodingISOLatin1 = 0x0201 ++//static kCFStringEncodingISOLatin2: CFStringEncoding = 0x0202; ++//static kCFStringEncodingISOLatin3: CFStringEncoding = 0x0203; ++//static kCFStringEncodingISOLatin4: CFStringEncoding = 0x0204; ++//static kCFStringEncodingISOLatinCyrillic: CFStringEncoding = 0x0205; ++//static kCFStringEncodingISOLatinArabic: CFStringEncoding = 0x0206; ++//static kCFStringEncodingISOLatinGreek: CFStringEncoding = 0x0207; ++//static kCFStringEncodingISOLatinHebrew: CFStringEncoding = 0x0208; ++//static kCFStringEncodingISOLatin5: CFStringEncoding = 0x0209; ++//static kCFStringEncodingISOLatin6: CFStringEncoding = 0x020A; ++//static kCFStringEncodingISOLatinThai: CFStringEncoding = 0x020B; ++//static kCFStringEncodingISOLatin7: CFStringEncoding = 0x020D; ++//static kCFStringEncodingISOLatin8: CFStringEncoding = 0x020E; ++//static kCFStringEncodingISOLatin9: CFStringEncoding = 0x020F; ++//static kCFStringEncodingISOLatin10: CFStringEncoding = 0x0210; ++//static kCFStringEncodingDOSLatinUS: CFStringEncoding = 0x0400; ++//static kCFStringEncodingDOSGreek: CFStringEncoding = 0x0405; ++//static kCFStringEncodingDOSBalticRim: CFStringEncoding = 0x0406; ++//static kCFStringEncodingDOSLatin1: CFStringEncoding = 0x0410; ++//static kCFStringEncodingDOSGreek1: CFStringEncoding = 0x0411; ++//static kCFStringEncodingDOSLatin2: CFStringEncoding = 0x0412; ++//static kCFStringEncodingDOSCyrillic: CFStringEncoding = 0x0413; ++//static kCFStringEncodingDOSTurkish: CFStringEncoding = 0x0414; ++//static kCFStringEncodingDOSPortuguese: CFStringEncoding = 0x0415; ++//static kCFStringEncodingDOSIcelandic: CFStringEncoding = 0x0416; ++//static kCFStringEncodingDOSHebrew: CFStringEncoding = 0x0417; ++//static kCFStringEncodingDOSCanadianFrench: CFStringEncoding = 0x0418; ++//static kCFStringEncodingDOSArabic: CFStringEncoding = 0x0419; ++//static kCFStringEncodingDOSNordic: CFStringEncoding = 0x041A; ++//static kCFStringEncodingDOSRussian: CFStringEncoding = 0x041B; ++//static kCFStringEncodingDOSGreek2: CFStringEncoding = 0x041C; ++//static kCFStringEncodingDOSThai: CFStringEncoding = 0x041D; ++//static kCFStringEncodingDOSJapanese: CFStringEncoding = 0x0420; ++//static kCFStringEncodingDOSChineseSimplif: CFStringEncoding = 0x0421; ++//static kCFStringEncodingDOSKorean: CFStringEncoding = 0x0422; ++//static kCFStringEncodingDOSChineseTrad: CFStringEncoding = 0x0423; ++// Defined above: kCFStringEncodingWindowsLatin1 = 0x0500 ++//static kCFStringEncodingWindowsLatin2: CFStringEncoding = 0x0501; ++//static kCFStringEncodingWindowsCyrillic: CFStringEncoding = 0x0502; ++//static kCFStringEncodingWindowsGreek: CFStringEncoding = 0x0503; ++//static kCFStringEncodingWindowsLatin5: CFStringEncoding = 0x0504; ++//static kCFStringEncodingWindowsHebrew: CFStringEncoding = 0x0505; ++//static kCFStringEncodingWindowsArabic: CFStringEncoding = 0x0506; ++//static kCFStringEncodingWindowsBalticRim: CFStringEncoding = 0x0507; ++//static kCFStringEncodingWindowsVietnamese: CFStringEncoding = 0x0508; ++//static kCFStringEncodingWindowsKoreanJohab: CFStringEncoding = 0x0510; ++// Defined above: kCFStringEncodingASCII = 0x0600 ++//static kCFStringEncodingANSEL: CFStringEncoding = 0x0601; ++//static kCFStringEncodingJIS_X0201_76: CFStringEncoding = 0x0620; ++//static kCFStringEncodingJIS_X0208_83: CFStringEncoding = 0x0621; ++//static kCFStringEncodingJIS_X0208_90: CFStringEncoding = 0x0622; ++//static kCFStringEncodingJIS_X0212_90: CFStringEncoding = 0x0623; ++//static kCFStringEncodingJIS_C6226_78: CFStringEncoding = 0x0624; ++//static kCFStringEncodingShiftJIS_X0213: CFStringEncoding = 0x0628; ++//static kCFStringEncodingShiftJIS_X0213_MenKuTen: CFStringEncoding = 0x0629; ++//static kCFStringEncodingGB_2312_80: CFStringEncoding = 0x0630; ++//static kCFStringEncodingGBK_95: CFStringEncoding = 0x0631; ++//static kCFStringEncodingGB_18030_2000: CFStringEncoding = 0x0632; ++//static kCFStringEncodingKSC_5601_87: CFStringEncoding = 0x0640; ++//static kCFStringEncodingKSC_5601_92_Johab: CFStringEncoding = 0x0641; ++//static kCFStringEncodingCNS_11643_92_P1: CFStringEncoding = 0x0651; ++//static kCFStringEncodingCNS_11643_92_P2: CFStringEncoding = 0x0652; ++//static kCFStringEncodingCNS_11643_92_P3: CFStringEncoding = 0x0653; ++//static kCFStringEncodingISO_2022_JP: CFStringEncoding = 0x0820; ++//static kCFStringEncodingISO_2022_JP_2: CFStringEncoding = 0x0821; ++//static kCFStringEncodingISO_2022_JP_1: CFStringEncoding = 0x0822; ++//static kCFStringEncodingISO_2022_JP_3: CFStringEncoding = 0x0823; ++//static kCFStringEncodingISO_2022_CN: CFStringEncoding = 0x0830; ++//static kCFStringEncodingISO_2022_CN_EXT: CFStringEncoding = 0x0831; ++//static kCFStringEncodingISO_2022_KR: CFStringEncoding = 0x0840; ++//static kCFStringEncodingEUC_JP: CFStringEncoding = 0x0920; ++//static kCFStringEncodingEUC_CN: CFStringEncoding = 0x0930; ++//static kCFStringEncodingEUC_TW: CFStringEncoding = 0x0931; ++//static kCFStringEncodingEUC_KR: CFStringEncoding = 0x0940; ++//static kCFStringEncodingShiftJIS: CFStringEncoding = 0x0A01; ++//static kCFStringEncodingKOI8_R: CFStringEncoding = 0x0A02; ++//static kCFStringEncodingBig5: CFStringEncoding = 0x0A03; ++//static kCFStringEncodingMacRomanLatin1: CFStringEncoding = 0x0A04; ++//static kCFStringEncodingHZ_GB_2312: CFStringEncoding = 0x0A05; ++//static kCFStringEncodingBig5_HKSCS_1999: CFStringEncoding = 0x0A06; ++//static kCFStringEncodingVISCII: CFStringEncoding = 0x0A07; ++//static kCFStringEncodingKOI8_U: CFStringEncoding = 0x0A08; ++//static kCFStringEncodingBig5_E: CFStringEncoding = 0x0A09; ++// Defined above: kCFStringEncodingNextStepLatin = 0x0B01 ++//static kCFStringEncodingNextStepJapanese: CFStringEncoding = 0x0B02; ++//static kCFStringEncodingEBCDIC_US: CFStringEncoding = 0x0C01; ++//static kCFStringEncodingEBCDIC_CP037: CFStringEncoding = 0x0C02; ++//static kCFStringEncodingUTF7: CFStringEncoding = 0x04000100; ++//static kCFStringEncodingUTF7_IMAP: CFStringEncoding = 0x0A10; ++//static kCFStringEncodingShiftJIS_X0213_00: CFStringEncoding = 0x0628; /* Deprecated */ ++ ++#[repr(C)] ++pub struct __CFString(c_void); ++ ++pub type CFStringRef = *const __CFString; ++ ++extern { ++ /* ++ * CFString.h ++ */ ++ ++ // N.B. organized according to "Functions by task" in docs ++ ++ /* Creating a CFString */ ++ //fn CFSTR ++ //fn CFStringCreateArrayBySeparatingStrings ++ //fn CFStringCreateByCombiningStrings ++ //fn CFStringCreateCopy ++ //fn CFStringCreateFromExternalRepresentation ++ pub fn CFStringCreateWithBytes(alloc: CFAllocatorRef, ++ bytes: *const u8, ++ numBytes: CFIndex, ++ encoding: CFStringEncoding, ++ isExternalRepresentation: Boolean) ++ -> CFStringRef; ++ pub fn CFStringCreateWithBytesNoCopy(alloc: CFAllocatorRef, ++ bytes: *const u8, ++ numBytes: CFIndex, ++ encoding: CFStringEncoding, ++ isExternalRepresentation: Boolean, ++ contentsDeallocator: CFAllocatorRef) ++ -> CFStringRef; ++ //fn CFStringCreateWithCharacters ++ //fn CFStringCreateWithCharactersNoCopy ++ pub fn CFStringCreateWithCString(alloc: CFAllocatorRef, ++ cStr: *const c_char, ++ encoding: CFStringEncoding) ++ -> CFStringRef; ++ //fn CFStringCreateWithCStringNoCopy ++ //fn CFStringCreateWithFormat ++ //fn CFStringCreateWithFormatAndArguments ++ //fn CFStringCreateWithPascalString ++ //fn CFStringCreateWithPascalStringNoCopy ++ //fn CFStringCreateWithSubstring ++ ++ /* Searching Strings */ ++ //fn CFStringCreateArrayWithFindResults ++ //fn CFStringFind ++ //fn CFStringFindCharacterFromSet ++ //fn CFStringFindWithOptions ++ //fn CFStringFindWithOptionsAndLocale ++ //fn CFStringGetLineBounds ++ ++ /* Comparing Strings */ ++ //fn CFStringCompare ++ //fn CFStringCompareWithOptions ++ //fn CFStringCompareWithOptionsAndLocale ++ //fn CFStringHasPrefix ++ //fn CFStringHasSuffix ++ ++ /* Accessing Characters */ ++ //fn CFStringCreateExternalRepresentation ++ pub fn CFStringGetBytes(theString: CFStringRef, ++ range: CFRange, ++ encoding: CFStringEncoding, ++ lossByte: u8, ++ isExternalRepresentation: Boolean, ++ buffer: *mut u8, ++ maxBufLen: CFIndex, ++ usedBufLen: *mut CFIndex) ++ -> CFIndex; ++ //fn CFStringGetCharacterAtIndex ++ //fn CFStringGetCharacters ++ //fn CFStringGetCharactersPtr ++ //fn CFStringGetCharacterFromInlineBuffer ++ pub fn CFStringGetCString(theString: CFStringRef, ++ buffer: *mut c_char, ++ bufferSize: CFIndex, ++ encoding: CFStringEncoding) ++ -> Boolean; ++ pub fn CFStringGetCStringPtr(theString: CFStringRef, ++ encoding: CFStringEncoding) ++ -> *const c_char; ++ pub fn CFStringGetLength(theString: CFStringRef) -> CFIndex; ++ //fn CFStringGetPascalString ++ //fn CFStringGetPascalStringPtr ++ //fn CFStringGetRangeOfComposedCharactersAtIndex ++ //fn CFStringInitInlineBuffer ++ ++ /* Working With Hyphenation */ ++ //fn CFStringGetHyphenationLocationBeforeIndex ++ //fn CFStringIsHyphenationAvailableForLocale ++ ++ /* Working With Encodings */ ++ //fn CFStringConvertEncodingToIANACharSetName ++ //fn CFStringConvertEncodingToNSStringEncoding ++ //fn CFStringConvertEncodingToWindowsCodepage ++ //fn CFStringConvertIANACharSetNameToEncoding ++ //fn CFStringConvertNSStringEncodingToEncoding ++ //fn CFStringConvertWindowsCodepageToEncoding ++ //fn CFStringGetFastestEncoding ++ //fn CFStringGetListOfAvailableEncodings ++ //fn CFStringGetMaximumSizeForEncoding ++ //fn CFStringGetMostCompatibleMacStringEncoding ++ //fn CFStringGetNameOfEncoding ++ //fn CFStringGetSmallestEncoding ++ //fn CFStringGetSystemEncoding ++ //fn CFStringIsEncodingAvailable ++ ++ /* Getting Numeric Values */ ++ //fn CFStringGetDoubleValue ++ //fn CFStringGetIntValue ++ ++ /* Getting String Properties */ ++ //fn CFShowStr ++ pub fn CFStringGetTypeID() -> CFTypeID; ++ ++ /* String File System Representations */ ++ //fn CFStringCreateWithFileSystemRepresentation ++ //fn CFStringGetFileSystemRepresentation ++ //fn CFStringGetMaximumSizeOfFileSystemRepresentation ++ ++ /* Getting Paragraph Bounds */ ++ //fn CFStringGetParagraphBounds ++ ++ /* Managing Surrogates */ ++ //fn CFStringGetLongCharacterForSurrogatePair ++ //fn CFStringGetSurrogatePairForLongCharacter ++ //fn CFStringIsSurrogateHighCharacter ++ //fn CFStringIsSurrogateLowCharacter ++} diff --cc vendor/core-foundation-sys-0.6.1/src/timezone.rs index 000000000,000000000..376cfdcde new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/timezone.rs @@@ -1,0 -1,0 +1,27 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFTypeID}; ++use date::{CFTimeInterval, CFAbsoluteTime}; ++ ++#[repr(C)] ++pub struct __CFTimeZone(c_void); ++ ++pub type CFTimeZoneRef = *const __CFTimeZone; ++ ++extern { ++ pub fn CFTimeZoneCopySystem() -> CFTimeZoneRef; ++ pub fn CFTimeZoneCopyDefault() -> CFTimeZoneRef; ++ pub fn CFTimeZoneCreateWithTimeIntervalFromGMT(allocator: CFAllocatorRef, interval: CFTimeInterval) -> CFTimeZoneRef; ++ pub fn CFTimeZoneGetSecondsFromGMT(tz: CFTimeZoneRef, time: CFAbsoluteTime) -> CFTimeInterval; ++ ++ pub fn CFTimeZoneGetTypeID() -> CFTypeID; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/url.rs index 000000000,000000000..9c4bc3e4b new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/url.rs @@@ -1,0 -1,0 +1,164 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFOptionFlags, CFIndex, CFAllocatorRef, Boolean, CFTypeID, CFTypeRef, SInt32}; ++use string::{CFStringRef, CFStringEncoding}; ++use error::CFErrorRef; ++ ++#[repr(C)] ++pub struct __CFURL(c_void); ++ ++pub type CFURLRef = *const __CFURL; ++ ++pub type CFURLBookmarkCreationOptions = CFOptionFlags; ++ ++pub type CFURLPathStyle = CFIndex; ++ ++/* typedef CF_ENUM(CFIndex, CFURLPathStyle) */ ++pub const kCFURLPOSIXPathStyle: CFURLPathStyle = 0; ++pub const kCFURLHFSPathStyle: CFURLPathStyle = 1; ++pub const kCFURLWindowsPathStyle: CFURLPathStyle = 2; ++ ++// static kCFURLBookmarkCreationPreferFileIDResolutionMask: CFURLBookmarkCreationOptions = ++// (1 << 8) as u32; ++// static kCFURLBookmarkCreationMinimalBookmarkMask: CFURLBookmarkCreationOptions = ++// (1 << 9) as u32; ++// static kCFURLBookmarkCreationSuitableForBookmarkFile: CFURLBookmarkCreationOptions = ++// (1 << 10) as u32; ++// static kCFURLBookmarkCreationWithSecurityScope: CFURLBookmarkCreationOptions = ++// (1 << 11) as u32; ++// static kCFURLBookmarkCreationSecurityScopeAllowOnlyReadAccess: CFURLBookmarkCreationOptions = ++// (1 << 12) as u32; ++ ++// TODO: there are a lot of missing keys and constants. Add if you are bored or need them. ++ ++extern { ++ /* ++ * CFURL.h ++ */ ++ ++ /* Common File System Resource Keys */ ++ pub static kCFURLAttributeModificationDateKey: CFStringRef; ++ pub static kCFURLContentAccessDateKey: CFStringRef; ++ pub static kCFURLContentModificationDateKey: CFStringRef; ++ pub static kCFURLCreationDateKey: CFStringRef; ++ pub static kCFURLFileResourceIdentifierKey: CFStringRef; ++ pub static kCFURLFileSecurityKey: CFStringRef; ++ pub static kCFURLHasHiddenExtensionKey: CFStringRef; ++ pub static kCFURLIsDirectoryKey: CFStringRef; ++ pub static kCFURLIsExecutableKey: CFStringRef; ++ pub static kCFURLIsHiddenKey: CFStringRef; ++ pub static kCFURLIsPackageKey: CFStringRef; ++ pub static kCFURLIsReadableKey: CFStringRef; ++ pub static kCFURLIsRegularFileKey: CFStringRef; ++ pub static kCFURLIsSymbolicLinkKey: CFStringRef; ++ pub static kCFURLIsSystemImmutableKey: CFStringRef; ++ pub static kCFURLIsUserImmutableKey: CFStringRef; ++ pub static kCFURLIsVolumeKey: CFStringRef; ++ pub static kCFURLIsWritableKey: CFStringRef; ++ pub static kCFURLLabelNumberKey: CFStringRef; ++ pub static kCFURLLinkCountKey: CFStringRef; ++ pub static kCFURLLocalizedLabelKey: CFStringRef; ++ pub static kCFURLLocalizedNameKey: CFStringRef; ++ pub static kCFURLLocalizedTypeDescriptionKey: CFStringRef; ++ pub static kCFURLNameKey: CFStringRef; ++ pub static kCFURLParentDirectoryURLKey: CFStringRef; ++ pub static kCFURLPreferredIOBlockSizeKey: CFStringRef; ++ pub static kCFURLTypeIdentifierKey: CFStringRef; ++ pub static kCFURLVolumeIdentifierKey: CFStringRef; ++ pub static kCFURLVolumeURLKey: CFStringRef; ++ ++ #[cfg(feature="mac_os_10_8_features")] ++ #[cfg_attr(feature = "mac_os_10_7_support", linkage = "extern_weak")] ++ pub static kCFURLIsExcludedFromBackupKey: CFStringRef; ++ pub static kCFURLFileResourceTypeKey: CFStringRef; ++ ++ /* Creating a CFURL */ ++ pub fn CFURLCopyAbsoluteURL(anURL: CFURLRef) -> CFURLRef; ++ //fn CFURLCreateAbsoluteURLWithBytes ++ //fn CFURLCreateByResolvingBookmarkData ++ //fn CFURLCreateCopyAppendingPathComponent ++ //fn CFURLCreateCopyAppendingPathExtension ++ //fn CFURLCreateCopyDeletingLastPathComponent ++ //fn CFURLCreateCopyDeletingPathExtension ++ pub fn CFURLCreateFilePathURL(allocator: CFAllocatorRef, url: CFURLRef, error: *mut CFErrorRef) -> CFURLRef; ++ //fn CFURLCreateFileReferenceURL ++ pub fn CFURLCreateFromFileSystemRepresentation(allocator: CFAllocatorRef, buffer: *const u8, bufLen: CFIndex, isDirectory: Boolean) -> CFURLRef; ++ //fn CFURLCreateFromFileSystemRepresentationRelativeToBase ++ //fn CFURLCreateFromFSRef ++ pub fn CFURLCreateWithBytes(allocator: CFAllocatorRef, URLBytes: *const u8, length: CFIndex, encoding: CFStringEncoding, baseURL: CFURLRef) -> CFURLRef; ++ pub fn CFURLCreateWithFileSystemPath(allocator: CFAllocatorRef, filePath: CFStringRef, pathStyle: CFURLPathStyle, isDirectory: Boolean) -> CFURLRef; ++ pub fn CFURLCreateWithFileSystemPathRelativeToBase(allocator: CFAllocatorRef, filePath: CFStringRef, pathStyle: CFURLPathStyle, isDirectory: Boolean, baseURL: CFURLRef) -> CFURLRef; ++ //fn CFURLCreateWithString(allocator: CFAllocatorRef, urlString: CFStringRef, ++ // baseURL: CFURLRef) -> CFURLRef; ++ ++ /* Accessing the Parts of a URL */ ++ pub fn CFURLCanBeDecomposed(anURL: CFURLRef) -> Boolean; ++ pub fn CFURLCopyFileSystemPath(anURL: CFURLRef, pathStyle: CFURLPathStyle) -> CFStringRef; ++ pub fn CFURLCopyFragment(anURL: CFURLRef, charactersToLeaveEscaped: CFStringRef) -> CFStringRef; ++ pub fn CFURLCopyHostName(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyLastPathComponent(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyNetLocation(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyParameterString(anURL: CFURLRef, charactersToLeaveEscaped: CFStringRef) -> CFStringRef; ++ pub fn CFURLCopyPassword(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyPath(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyPathExtension(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyQueryString(anURL: CFURLRef, charactersToLeaveEscaped: CFStringRef) -> CFStringRef; ++ pub fn CFURLCopyResourceSpecifier(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyScheme(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLCopyStrictPath(anURL: CFURLRef, isAbsolute: *mut Boolean) -> CFStringRef; ++ pub fn CFURLCopyUserName(anURL: CFURLRef) -> CFStringRef; ++ pub fn CFURLGetPortNumber(anURL: CFURLRef) -> SInt32; ++ pub fn CFURLHasDirectoryPath(anURL: CFURLRef) -> Boolean; ++ ++ /* Converting URLs to Other Representations */ ++ //fn CFURLCreateData(allocator: CFAllocatorRef, url: CFURLRef, ++ // encoding: CFStringEncoding, escapeWhitespace: bool) -> CFDataRef; ++ //fn CFURLCreateStringByAddingPercentEscapes ++ //fn CFURLCreateStringByReplacingPercentEscapes ++ //fn CFURLCreateStringByReplacingPercentEscapesUsingEncoding ++ pub fn CFURLGetFileSystemRepresentation(anURL: CFURLRef, resolveAgainstBase: Boolean, buffer: *mut u8, maxBufLen: CFIndex) -> Boolean; ++ ++ //fn CFURLGetFSRef ++ pub fn CFURLGetString(anURL: CFURLRef) -> CFStringRef; ++ ++ /* Getting URL Properties */ ++ //fn CFURLGetBaseURL(anURL: CFURLRef) -> CFURLRef; ++ pub fn CFURLGetBytes(anURL: CFURLRef, buffer: *mut u8, bufferLength: CFIndex) -> CFIndex; ++ //fn CFURLGetByteRangeForComponent ++ pub fn CFURLGetTypeID() -> CFTypeID; ++ //fn CFURLResourceIsReachable ++ ++ /* Getting and Setting File System Resource Properties */ ++ pub fn CFURLClearResourcePropertyCache(url: CFURLRef); ++ //fn CFURLClearResourcePropertyCacheForKey ++ //fn CFURLCopyResourcePropertiesForKeys ++ //fn CFURLCopyResourcePropertyForKey ++ //fn CFURLCreateResourcePropertiesForKeysFromBookmarkData ++ //fn CFURLCreateResourcePropertyForKeyFromBookmarkData ++ //fn CFURLSetResourcePropertiesForKeys ++ pub fn CFURLSetResourcePropertyForKey(url: CFURLRef, key: CFStringRef, value: CFTypeRef, error: *mut CFErrorRef) -> Boolean; ++ //fn CFURLSetTemporaryResourcePropertyForKey ++ ++ /* Working with Bookmark Data */ ++ //fn CFURLCreateBookmarkData ++ //fn CFURLCreateBookmarkDataFromAliasRecord ++ //fn CFURLCreateBookmarkDataFromFile ++ //fn CFURLWriteBookmarkDataToFile ++ //fn CFURLStartAccessingSecurityScopedResource ++ //fn CFURLStopAccessingSecurityScopedResource ++} ++ ++#[test] ++#[cfg(feature="mac_os_10_8_features")] ++fn can_see_excluded_from_backup_key() { ++ let _ = unsafe { kCFURLIsExcludedFromBackupKey }; ++} diff --cc vendor/core-foundation-sys-0.6.1/src/uuid.rs index 000000000,000000000..425395293 new file mode 100644 --- /dev/null +++ b/vendor/core-foundation-sys-0.6.1/src/uuid.rs @@@ -1,0 -1,0 +1,49 @@@ ++// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::os::raw::c_void; ++ ++use base::{CFAllocatorRef, CFTypeID}; ++ ++#[repr(C)] ++pub struct __CFUUID(c_void); ++ ++pub type CFUUIDRef = *const __CFUUID; ++ ++#[repr(C)] ++#[derive(Clone, Copy, Default)] ++pub struct CFUUIDBytes { ++ pub byte0: u8, ++ pub byte1: u8, ++ pub byte2: u8, ++ pub byte3: u8, ++ pub byte4: u8, ++ pub byte5: u8, ++ pub byte6: u8, ++ pub byte7: u8, ++ pub byte8: u8, ++ pub byte9: u8, ++ pub byte10: u8, ++ pub byte11: u8, ++ pub byte12: u8, ++ pub byte13: u8, ++ pub byte14: u8, ++ pub byte15: u8 ++} ++ ++extern { ++ /* ++ * CFUUID.h ++ */ ++ pub fn CFUUIDCreate(allocator: CFAllocatorRef) -> CFUUIDRef; ++ pub fn CFUUIDCreateFromUUIDBytes(allocator: CFAllocatorRef, bytes: CFUUIDBytes) -> CFUUIDRef; ++ pub fn CFUUIDGetUUIDBytes(uuid: CFUUIDRef) -> CFUUIDBytes; ++ ++ pub fn CFUUIDGetTypeID() -> CFTypeID; ++} diff --cc vendor/curl-0.4.14/.cargo-checksum.json index 000000000,000000000..c4aa0761b new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"444c2f9e71458b34e75471ed8d756947a0bb920b8b8b9bfc56dfcc4fc6819a13"} diff --cc vendor/curl-0.4.14/.gitmodules index 000000000,000000000..7196785dd new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/.gitmodules @@@ -1,0 -1,0 +1,3 @@@ ++[submodule "curl-sys/curl"] ++ path = curl-sys/curl ++ url = https://github.com/alexcrichton/curl diff --cc vendor/curl-0.4.14/.travis.yml index 000000000,000000000..d0b3a1f15 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/.travis.yml @@@ -1,0 -1,0 +1,69 @@@ ++language: rust ++sudo: required ++dist: trusty ++services: ++ - docker ++ ++matrix: ++ include: ++ - os: linux ++ rust: stable ++ env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 NO_ADD=1 ++ - os: linux ++ rust: stable ++ env: TARGET=i686-unknown-linux-gnu DOCKER=linux32 ++ - os: linux ++ rust: stable ++ env: TARGET=x86_64-unknown-linux-musl DOCKER=musl ++ - os: linux ++ rust: stable ++ env: TARGET=x86_64-pc-windows-gnu NO_RUN=1 DOCKER=mingw ++ - os: linux ++ rust: stable ++ env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64-curl NO_ADD=1 ++ - os: osx ++ rust: stable ++ env: TARGET=x86_64-apple-darwin NO_ADD=1 ++ - os: osx ++ rust: stable ++ env: TARGET=i686-apple-darwin ++ - os: linux ++ rust: beta ++ env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 NO_ADD=1 ++ - os: linux ++ rust: nightly ++ env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 NO_ADD=1 ++ before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++ after_success: ++ - travis-cargo doc-upload ++sudo: false ++install: ++ - if [ -z "$NO_ADD" ]; then rustup target add $TARGET; fi ++script: ++ - curl --version ++ - cargo generate-lockfile ++ - cargo generate-lockfile --manifest-path systest/Cargo.toml ++ - if [ -z "$DOCKER" ]; then ++ sh ci/run.sh; ++ else ++ mkdir .cargo target; ++ docker build -t rust -f ci/Dockerfile-$DOCKER ci; ++ docker run ++ -w /src ++ -v `pwd`:/src:ro ++ -v `pwd`/target:/src/target ++ -v `pwd`/ci/.cargo:/src/.cargo:ro ++ -v `rustc --print sysroot`:/usr/local:ro ++ -e TARGET=$TARGET ++ -e NO_RUN=$NO_RUN ++ -e CARGO_TARGET_DIR=/src/target ++ -it rust ++ sh ci/run.sh; ++ fi ++notifications: ++ email: ++ on_success: never ++env: ++ global: ++ secure: "j4son34/PmqogLMUHgcvOk+XtyUtcd0aAA8Sa/h4pyupw8AEM7+5DMMIrcrRh7ieKqmL2RSSGnYtYbd2b5yYroudypsqmQhK0StzrtPaftl/8zxw8liXzA9rat8MP0vuEAe5w9KLRdFKUCU7TzcYXcKttpbavqdNsJae+OFzHJc=" diff --cc vendor/curl-0.4.14/Cargo.toml index 000000000,000000000..9a6c5c659 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/Cargo.toml @@@ -1,0 -1,0 +1,52 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "curl" ++version = "0.4.14" ++authors = ["Alex Crichton "] ++description = "Rust bindings to libcurl for making HTTP requests" ++homepage = "https://github.com/alexcrichton/curl-rust" ++documentation = "https://docs.rs/curl" ++categories = ["api-bindings", "web-programming::http-client"] ++license = "MIT" ++repository = "https://github.com/alexcrichton/curl-rust" ++[dependencies.curl-sys] ++version = "0.4.8" ++ ++[dependencies.libc] ++version = "0.2.42" ++ ++[dependencies.socket2] ++version = "0.3.7" ++[dev-dependencies.mio] ++version = "0.6" ++ ++[dev-dependencies.mio-extras] ++version = "2.0.3" ++[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] ++version = "0.1.2" ++ ++[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] ++version = "0.9.33" ++[target."cfg(target_env=\"msvc\")".dependencies.kernel32-sys] ++version = "0.2.2" ++ ++[target."cfg(target_env=\"msvc\")".dependencies.schannel] ++version = "0.1.13" ++[target."cfg(windows)".dependencies.winapi] ++version = "0.2.7" ++[badges.appveyor] ++repository = "alexcrichton/curl-rust" ++ ++[badges.travis-ci] ++repository = "alexcrichton/curl-rust" diff --cc vendor/curl-0.4.14/LICENSE index 000000000,000000000..5f5e4b09d new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/LICENSE @@@ -1,0 -1,0 +1,19 @@@ ++Copyright (c) 2014 Carl Lerche ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. diff --cc vendor/curl-0.4.14/README.md index 000000000,000000000..b458c3b11 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/README.md @@@ -1,0 -1,0 +1,138 @@@ ++# curl-rust ++ ++libcurl bindings for Rust ++ ++[![Build Status](https://travis-ci.org/alexcrichton/curl-rust.svg?branch=master)](https://travis-ci.org/alexcrichton/curl-rust) ++[![Build status](https://ci.appveyor.com/api/projects/status/lx98wtbxhhhajpr9?svg=true)](https://ci.appveyor.com/project/alexcrichton/curl-rust) ++ ++[Documentation](https://docs.rs/curl) ++ ++## Quick Start ++ ++```rust ++extern crate curl; ++ ++use std::io::{stdout, Write}; ++ ++use curl::easy::Easy; ++ ++// Print a web page onto stdout ++fn main() { ++ let mut easy = Easy::new(); ++ easy.url("https://www.rust-lang.org/").unwrap(); ++ easy.write_function(|data| { ++ stdout().write_all(data).unwrap(); ++ Ok(data.len()) ++ }).unwrap(); ++ easy.perform().unwrap(); ++ ++ println!("{}", easy.response_code().unwrap()); ++} ++``` ++ ++```rust ++extern crate curl; ++ ++use curl::easy::Easy; ++ ++// Capture output into a local `Vec`. ++fn main() { ++ let mut dst = Vec::new(); ++ let mut easy = Easy::new(); ++ easy.url("https://www.rust-lang.org/").unwrap(); ++ ++ let mut transfer = easy.transfer(); ++ transfer.write_function(|data| { ++ dst.extend_from_slice(data); ++ Ok(data.len()) ++ }).unwrap(); ++ transfer.perform().unwrap(); ++} ++``` ++ ++## Post / Put requests ++ ++The `put` and `post` methods on `Easy` can configure the method of the HTTP ++request, and then `read_function` can be used to specify how data is filled in. ++This interface works particularly well with types that implement `Read`. ++ ++```rust,no_run ++extern crate curl; ++ ++use std::io::Read; ++use curl::easy::Easy; ++ ++fn main() { ++ let mut data = "this is the body".as_bytes(); ++ ++ let mut easy = Easy::new(); ++ easy.url("http://www.example.com/upload").unwrap(); ++ easy.post(true).unwrap(); ++ easy.post_field_size(data.len() as u64).unwrap(); ++ ++ let mut transfer = easy.transfer(); ++ transfer.read_function(|buf| { ++ Ok(data.read(buf).unwrap_or(0)) ++ }).unwrap(); ++ transfer.perform().unwrap(); ++} ++``` ++ ++## Custom headers ++ ++Custom headers can be specified as part of the request: ++ ++```rust,no_run ++extern crate curl; ++ ++use curl::easy::{Easy, List}; ++ ++fn main() { ++ let mut easy = Easy::new(); ++ easy.url("http://www.example.com").unwrap(); ++ ++ let mut list = List::new(); ++ list.append("Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==").unwrap(); ++ easy.http_headers(list).unwrap(); ++ easy.perform().unwrap(); ++} ++``` ++ ++## Keep alive ++ ++The handle can be re-used across multiple requests. Curl will attempt to ++keep the connections alive. ++ ++```rust,no_run ++extern crate curl; ++ ++use curl::easy::Easy; ++ ++fn main() { ++ let mut handle = Easy::new(); ++ ++ handle.url("http://www.example.com/foo").unwrap(); ++ handle.perform().unwrap(); ++ ++ handle.url("http://www.example.com/bar").unwrap(); ++ handle.perform().unwrap(); ++} ++``` ++ ++## Multiple requests ++ ++The libcurl library provides support for sending multiple requests ++simultaneously through the "multi" interface. This is currently bound in the ++`multi` module of this crate and provides the ability to execute multiple ++transfers simultaneously. For more information, see that module. ++ ++## Version Support ++ ++The bindings have been developed using curl version 7.24.0. They should ++work with any newer version of curl and possibly with older versions, ++but this has not been tested. ++ ++## License ++ ++The `curl-rust` crate is licensed under the MIT license, see `LICENSE` for more ++details. diff --cc vendor/curl-0.4.14/appveyor.yml index 000000000,000000000..49f51274b new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/appveyor.yml @@@ -1,0 -1,0 +1,40 @@@ ++environment: ++ matrix: ++ ++ # Ensure vanilla builds work ++ - TARGET: i686-pc-windows-msvc ++ - TARGET: x86_64-pc-windows-msvc ++ ++ # Pin to specific VS versions to ensure the build works ++ - TARGET: x86_64-pc-windows-msvc ++ ARCH: amd64 ++ VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat ++ - TARGET: x86_64-pc-windows-msvc ++ ARCH: amd64 ++ VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat ++ ++install: ++ # Install rust, x86_64-pc-windows-msvc host ++ - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe ++ # use nightly if required until -Ctarget-feature=+crt-static is stable (expected in rust 1.19) ++ - if not defined RUSTFLAGS rustup-init.exe -y --default-host x86_64-pc-windows-msvc ++ - if defined RUSTFLAGS rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly ++ - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin ++ ++ # Install the target we're compiling for ++ - if NOT "%TARGET%" == "x86_64-pc-windows-msvc" rustup target add %TARGET% ++ ++ # If we're pinning to a specific visual studio, do so now ++ - if defined VS call "%VS%" %ARCH% ++ ++ # let's see what we got ++ - where gcc rustc cargo ++ - rustc -vV ++ - cargo -vV ++ - set CARGO_TARGET_DIR=%CD%\target ++ ++build: false ++ ++test_script: ++ - cargo test --target %TARGET% ++ - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff --cc vendor/curl-0.4.14/ci/.cargo/config index 000000000,000000000..5ed633890 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/.cargo/config @@@ -1,0 -1,0 +1,2 @@@ ++[target.x86_64-pc-windows-gnu] ++linker = "x86_64-w64-mingw32-gcc" diff --cc vendor/curl-0.4.14/ci/Dockerfile-linux32 index 000000000,000000000..4d55dcf04 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/Dockerfile-linux32 @@@ -1,0 -1,0 +1,14 @@@ ++FROM ubuntu:16.04 ++ ++RUN dpkg --add-architecture i386 && \ ++ apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ gcc-multilib \ ++ ca-certificates \ ++ make \ ++ libc6-dev \ ++ libssl-dev:i386 \ ++ pkg-config ++ ++ENV PKG_CONFIG=i686-linux-gnu-pkg-config \ ++ PKG_CONFIG_ALLOW_CROSS=1 diff --cc vendor/curl-0.4.14/ci/Dockerfile-linux64 index 000000000,000000000..0acd1223a new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/Dockerfile-linux64 @@@ -1,0 -1,0 +1,7 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc ca-certificates make libc6-dev \ ++ libssl-dev \ ++ pkg-config diff --cc vendor/curl-0.4.14/ci/Dockerfile-linux64-curl index 000000000,000000000..be03c24da new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/Dockerfile-linux64-curl @@@ -1,0 -1,0 +1,6 @@@ ++FROM ubuntu:14.04 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc ca-certificates make libc6-dev \ ++ libssl-dev libcurl4-openssl-dev pkg-config diff --cc vendor/curl-0.4.14/ci/Dockerfile-mingw index 000000000,000000000..ee5926c8d new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/Dockerfile-mingw @@@ -1,0 -1,0 +1,6 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc ca-certificates make libc6-dev \ ++ gcc-mingw-w64-x86-64 libz-mingw-w64-dev diff --cc vendor/curl-0.4.14/ci/Dockerfile-musl index 000000000,000000000..47d211fdf new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/Dockerfile-musl @@@ -1,0 -1,0 +1,18 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc ca-certificates make libc6-dev curl \ ++ musl-tools ++ ++RUN \ ++ curl https://www.openssl.org/source/old/1.0.2/openssl-1.0.2g.tar.gz | tar xzf - && \ ++ cd openssl-1.0.2g && \ ++ CC=musl-gcc ./Configure --prefix=/openssl no-dso linux-x86_64 -fPIC && \ ++ make -j10 && \ ++ make install && \ ++ cd .. && \ ++ rm -rf openssl-1.0.2g ++ ++ENV OPENSSL_STATIC=1 \ ++ OPENSSL_DIR=/openssl diff --cc vendor/curl-0.4.14/ci/run.sh index 000000000,000000000..e22c62da2 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/ci/run.sh @@@ -1,0 -1,0 +1,11 @@@ ++#!/bin/sh ++ ++set -ex ++ ++cargo test --target $TARGET --no-run ++if [ -z "$NO_RUN" ]; then ++ cargo test --target $TARGET ++ cargo run --manifest-path systest/Cargo.toml --target $TARGET ++ cargo doc --no-deps --target $TARGET ++ cargo doc --no-deps -p curl-sys --target $TARGET ++fi diff --cc vendor/curl-0.4.14/src/easy/form.rs index 000000000,000000000..be98d4e8e new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/form.rs @@@ -1,0 -1,0 +1,333 @@@ ++use std::ffi::CString; ++use std::fmt; ++use std::path::Path; ++ ++use FormError; ++use curl_sys; ++use easy::{list, List}; ++ ++/// Multipart/formdata for an HTTP POST request. ++/// ++/// This structure is built up and then passed to the `Easy::httppost` method to ++/// be sent off with a request. ++pub struct Form { ++ head: *mut curl_sys::curl_httppost, ++ tail: *mut curl_sys::curl_httppost, ++ headers: Vec, ++ buffers: Vec>, ++ strings: Vec, ++} ++ ++/// One part in a multipart upload, added to a `Form`. ++pub struct Part<'form, 'data> { ++ form: &'form mut Form, ++ name: &'data str, ++ array: Vec, ++ error: Option, ++} ++ ++pub fn raw(form: &Form) -> *mut curl_sys::curl_httppost { ++ form.head ++} ++ ++impl Form { ++ /// Creates a new blank form ready for the addition of new data. ++ pub fn new() -> Form { ++ Form { ++ head: 0 as *mut _, ++ tail: 0 as *mut _, ++ headers: Vec::new(), ++ buffers: Vec::new(), ++ strings: Vec::new(), ++ } ++ } ++ ++ /// Prepares adding a new part to this `Form` ++ /// ++ /// Note that the part is not actually added to the form until the `add` ++ /// method is called on `Part`, which may or may not fail. ++ pub fn part<'a, 'data>(&'a mut self, name: &'data str) -> Part<'a, 'data> { ++ Part { ++ error: None, ++ form: self, ++ name: name, ++ array: vec![curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_END, ++ value: 0 as *mut _, ++ }], ++ } ++ } ++} ++ ++impl fmt::Debug for Form { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ // TODO: fill this out more ++ f.debug_struct("Form") ++ .field("fields", &"...") ++ .finish() ++ } ++} ++ ++impl Drop for Form { ++ fn drop(&mut self) { ++ unsafe { ++ curl_sys::curl_formfree(self.head); ++ } ++ } ++} ++ ++impl<'form, 'data> Part<'form, 'data> { ++ /// A pointer to the contents of this part, the actual data to send away. ++ pub fn contents(&mut self, contents: &'data [u8]) -> &mut Self { ++ let pos = self.array.len() - 1; ++ ++ // curl has an oddity where if the length if 0 it will call strlen ++ // on the value. This means that if someone wants to add empty form ++ // contents we need to make sure the buffer contains a null byte. ++ let ptr = if contents.is_empty() { ++ b"\x00" ++ } else { ++ contents ++ }.as_ptr(); ++ ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_COPYCONTENTS, ++ value: ptr as *mut _, ++ }); ++ self.array.insert(pos + 1, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_CONTENTSLENGTH, ++ value: contents.len() as *mut _, ++ }); ++ self ++ } ++ ++ /// Causes this file to be read and its contents used as data in this part ++ /// ++ /// This part does not automatically become a file upload part simply ++ /// because its data was read from a file. ++ /// ++ /// # Errors ++ /// ++ /// If the filename has any internal nul bytes or if on Windows it does not ++ /// contain a unicode filename then the `add` function will eventually ++ /// return an error. ++ pub fn file_content

(&mut self, file: P) -> &mut Self ++ where P: AsRef ++ { ++ self._file_content(file.as_ref()) ++ } ++ ++ fn _file_content(&mut self, file: &Path) -> &mut Self { ++ if let Some(bytes) = self.path2cstr(file) { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_FILECONTENT, ++ value: bytes.as_ptr() as *mut _, ++ }); ++ self.form.strings.push(bytes); ++ } ++ self ++ } ++ ++ /// Makes this part a file upload part of the given file. ++ /// ++ /// Sets the filename field to the basename of the provided file name, and ++ /// it reads the contents of the file and passes them as data and sets the ++ /// content type if the given file matches one of the internally known file ++ /// extensions. ++ /// ++ /// The given upload file must exist entirely on the filesystem before the ++ /// upload is started because libcurl needs to read the size of it ++ /// beforehand. ++ /// ++ /// Multiple files can be uploaded by calling this method multiple times and ++ /// content types can also be configured for each file (by calling that ++ /// next). ++ /// ++ /// # Errors ++ /// ++ /// If the filename has any internal nul bytes or if on Windows it does not ++ /// contain a unicode filename then this function will cause `add` to return ++ /// an error when called. ++ pub fn file(&mut self, file: &'data P) -> &mut Self ++ where P: AsRef ++ { ++ self._file(file.as_ref()) ++ } ++ ++ fn _file(&mut self, file: &'data Path) -> &mut Self { ++ if let Some(bytes) = self.path2cstr(file) { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_FILE, ++ value: bytes.as_ptr() as *mut _, ++ }); ++ self.form.strings.push(bytes); ++ } ++ self ++ } ++ ++ /// Used in combination with `Part::file`, provides the content-type for ++ /// this part, possibly instead of choosing an internal one. ++ /// ++ /// # Panics ++ /// ++ /// This function will panic if `content_type` contains an internal nul ++ /// byte. ++ pub fn content_type(&mut self, content_type: &'data str) -> &mut Self { ++ if let Some(bytes) = self.bytes2cstr(content_type.as_bytes()) { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_CONTENTTYPE, ++ value: bytes.as_ptr() as *mut _, ++ }); ++ self.form.strings.push(bytes); ++ } ++ self ++ } ++ ++ /// Used in combination with `Part::file`, provides the filename for ++ /// this part instead of the actual one. ++ /// ++ /// # Errors ++ /// ++ /// If `name` contains an internal nul byte, or if on Windows the path is ++ /// not valid unicode then this function will return an error when `add` is ++ /// called. ++ pub fn filename(&mut self, name: &'data P) -> &mut Self ++ where P: AsRef ++ { ++ self._filename(name.as_ref()) ++ } ++ ++ fn _filename(&mut self, name: &'data Path) -> &mut Self { ++ if let Some(bytes) = self.path2cstr(name) { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_FILENAME, ++ value: bytes.as_ptr() as *mut _, ++ }); ++ self.form.strings.push(bytes); ++ } ++ self ++ } ++ ++ /// This is used to provide a custom file upload part without using the ++ /// `file` method above. ++ /// ++ /// The first parameter is for the filename field and the second is the ++ /// in-memory contents. ++ /// ++ /// # Errors ++ /// ++ /// If `name` contains an internal nul byte, or if on Windows the path is ++ /// not valid unicode then this function will return an error when `add` is ++ /// called. ++ pub fn buffer(&mut self, name: &'data P, data: Vec) ++ -> &mut Self ++ where P: AsRef ++ { ++ self._buffer(name.as_ref(), data) ++ } ++ ++ fn _buffer(&mut self, name: &'data Path, data: Vec) -> &mut Self { ++ if let Some(bytes) = self.path2cstr(name) { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_BUFFER, ++ value: bytes.as_ptr() as *mut _, ++ }); ++ self.form.strings.push(bytes); ++ self.array.insert(pos + 1, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_BUFFERPTR, ++ value: data.as_ptr() as *mut _, ++ }); ++ self.array.insert(pos + 2, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_BUFFERLENGTH, ++ value: data.len() as *mut _, ++ }); ++ self.form.buffers.push(data); ++ } ++ self ++ } ++ ++ /// Specifies extra headers for the form POST section. ++ /// ++ /// Appends the list of headers to those libcurl automatically generates. ++ pub fn content_header(&mut self, headers: List) -> &mut Self { ++ let pos = self.array.len() - 1; ++ self.array.insert(pos, curl_sys::curl_forms { ++ option: curl_sys::CURLFORM_CONTENTHEADER, ++ value: list::raw(&headers) as *mut _, ++ }); ++ self.form.headers.push(headers); ++ self ++ } ++ ++ /// Attempts to add this part to the `Form` that it was created from. ++ /// ++ /// If any error happens while adding, that error is returned, otherwise ++ /// `Ok(())` is returned. ++ pub fn add(&mut self) -> Result<(), FormError> { ++ if let Some(err) = self.error.clone() { ++ return Err(err) ++ } ++ let rc = unsafe { ++ curl_sys::curl_formadd(&mut self.form.head, ++ &mut self.form.tail, ++ curl_sys::CURLFORM_COPYNAME, ++ self.name.as_ptr(), ++ curl_sys::CURLFORM_NAMELENGTH, ++ self.name.len(), ++ curl_sys::CURLFORM_ARRAY, ++ self.array.as_ptr(), ++ curl_sys::CURLFORM_END) ++ }; ++ if rc == curl_sys::CURL_FORMADD_OK { ++ Ok(()) ++ } else { ++ Err(FormError::new(rc)) ++ } ++ } ++ ++ #[cfg(unix)] ++ fn path2cstr(&mut self, p: &Path) -> Option { ++ use std::os::unix::prelude::*; ++ self.bytes2cstr(p.as_os_str().as_bytes()) ++ } ++ ++ #[cfg(windows)] ++ fn path2cstr(&mut self, p: &Path) -> Option { ++ match p.to_str() { ++ Some(bytes) => self.bytes2cstr(bytes.as_bytes()), ++ None if self.error.is_none() => { ++ // TODO: better error code ++ self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); ++ None ++ } ++ None => None, ++ } ++ } ++ ++ fn bytes2cstr(&mut self, bytes: &[u8]) -> Option { ++ match CString::new(bytes) { ++ Ok(c) => Some(c), ++ Err(..) if self.error.is_none() => { ++ // TODO: better error code ++ self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); ++ None ++ } ++ Err(..) => None, ++ } ++ } ++} ++ ++impl<'form, 'data> fmt::Debug for Part<'form, 'data> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ // TODO: fill this out more ++ f.debug_struct("Part") ++ .field("name", &self.name) ++ .field("form", &self.form) ++ .finish() ++ } ++} diff --cc vendor/curl-0.4.14/src/easy/handle.rs index 000000000,000000000..f7663da3e new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/handle.rs @@@ -1,0 -1,0 +1,1461 @@@ ++use std::cell::Cell; ++use std::fmt; ++use std::io::SeekFrom; ++use std::path::Path; ++use std::ptr; ++use std::str; ++use std::time::Duration; ++ ++use curl_sys; ++use libc::c_void; ++ ++use Error; ++use easy::{Form, List}; ++use easy::handler::{self, InfoType, SeekResult, ReadError, WriteError}; ++use easy::handler::{TimeCondition, IpResolve, HttpVersion, SslVersion}; ++use easy::handler::{SslOpt, NetRc, Auth, ProxyType}; ++use easy::{Easy2, Handler}; ++ ++/// Raw bindings to a libcurl "easy session". ++/// ++/// This type is the same as the `Easy2` type in this library except that it ++/// does not contain a type parameter. Callbacks from curl are all controlled ++/// via closures on this `Easy` type, and this type namely has a `transfer` ++/// method as well for ergonomic management of these callbacks. ++/// ++/// There's not necessarily a right answer for which type is correct to use, but ++/// as a general rule of thumb `Easy` is typically a reasonable choice for ++/// synchronous I/O and `Easy2` is a good choice for asynchronous I/O. ++/// ++/// ## Examples ++/// ++/// Creating a handle which can be used later ++/// ++/// ``` ++/// use curl::easy::Easy; ++/// ++/// let handle = Easy::new(); ++/// ``` ++/// ++/// Send an HTTP request, writing the response to stdout. ++/// ++/// ``` ++/// use std::io::{stdout, Write}; ++/// ++/// use curl::easy::Easy; ++/// ++/// let mut handle = Easy::new(); ++/// handle.url("https://www.rust-lang.org/").unwrap(); ++/// handle.write_function(|data| { ++/// stdout().write_all(data).unwrap(); ++/// Ok(data.len()) ++/// }).unwrap(); ++/// handle.perform().unwrap(); ++/// ``` ++/// ++/// Collect all output of an HTTP request to a vector. ++/// ++/// ``` ++/// use curl::easy::Easy; ++/// ++/// let mut data = Vec::new(); ++/// let mut handle = Easy::new(); ++/// handle.url("https://www.rust-lang.org/").unwrap(); ++/// { ++/// let mut transfer = handle.transfer(); ++/// transfer.write_function(|new_data| { ++/// data.extend_from_slice(new_data); ++/// Ok(new_data.len()) ++/// }).unwrap(); ++/// transfer.perform().unwrap(); ++/// } ++/// println!("{:?}", data); ++/// ``` ++/// ++/// More examples of various properties of an HTTP request can be found on the ++/// specific methods as well. ++#[derive(Debug)] ++pub struct Easy { ++ inner: Easy2, ++} ++ ++/// A scoped transfer of information which borrows an `Easy` and allows ++/// referencing stack-local data of the lifetime `'data`. ++/// ++/// Usage of `Easy` requires the `'static` and `Send` bounds on all callbacks ++/// registered, but that's not often wanted if all you need is to collect a ++/// bunch of data in memory to a vector, for example. The `Transfer` structure, ++/// created by the `Easy::transfer` method, is used for this sort of request. ++/// ++/// The callbacks attached to a `Transfer` are only active for that one transfer ++/// object, and they allow to elide both the `Send` and `'static` bounds to ++/// close over stack-local information. ++pub struct Transfer<'easy, 'data> { ++ easy: &'easy mut Easy, ++ data: Box>, ++} ++ ++pub struct EasyData { ++ running: Cell, ++ owned: Callbacks<'static>, ++ borrowed: Cell<*mut Callbacks<'static>>, ++} ++ ++unsafe impl Send for EasyData {} ++ ++#[derive(Default)] ++struct Callbacks<'a> { ++ write: Option Result + 'a>>, ++ read: Option Result + 'a>>, ++ seek: Option SeekResult + 'a>>, ++ debug: Option>, ++ header: Option bool + 'a>>, ++ progress: Option bool + 'a>>, ++ ssl_ctx: Option Result<(), Error> + 'a>>, ++} ++ ++impl Easy { ++ /// Creates a new "easy" handle which is the core of almost all operations ++ /// in libcurl. ++ /// ++ /// To use a handle, applications typically configure a number of options ++ /// followed by a call to `perform`. Options are preserved across calls to ++ /// `perform` and need to be reset manually (or via the `reset` method) if ++ /// this is not desired. ++ pub fn new() -> Easy { ++ Easy { ++ inner: Easy2::new(EasyData { ++ running: Cell::new(false), ++ owned: Callbacks::default(), ++ borrowed: Cell::new(ptr::null_mut()), ++ }), ++ } ++ } ++ ++ // ========================================================================= ++ // Behavior options ++ ++ /// Same as [`Easy2::verbose`](struct.Easy2.html#method.verbose) ++ pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { ++ self.inner.verbose(verbose) ++ } ++ ++ /// Same as [`Easy2::show_header`](struct.Easy2.html#method.show_header) ++ pub fn show_header(&mut self, show: bool) -> Result<(), Error> { ++ self.inner.show_header(show) ++ } ++ ++ /// Same as [`Easy2::progress`](struct.Easy2.html#method.progress) ++ pub fn progress(&mut self, progress: bool) -> Result<(), Error> { ++ self.inner.progress(progress) ++ } ++ ++ /// Same as [`Easy2::signal`](struct.Easy2.html#method.signal) ++ pub fn signal(&mut self, signal: bool) -> Result<(), Error> { ++ self.inner.signal(signal) ++ } ++ ++ /// Same as [`Easy2::wildcard_match`](struct.Easy2.html#method.wildcard_match) ++ pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { ++ self.inner.wildcard_match(m) ++ } ++ ++ /// Same as [`Easy2::unix_socket`](struct.Easy2.html#method.unix_socket) ++ pub fn unix_socket(&mut self, unix_domain_socket: &str) -> Result<(), Error> { ++ self.inner.unix_socket(unix_domain_socket) ++ } ++ ++ // ========================================================================= ++ // Callback options ++ ++ /// Set callback for writing received data. ++ /// ++ /// This callback function gets called by libcurl as soon as there is data ++ /// received that needs to be saved. ++ /// ++ /// The callback function will be passed as much data as possible in all ++ /// invokes, but you must not make any assumptions. It may be one byte, it ++ /// may be thousands. If `show_header` is enabled, which makes header data ++ /// get passed to the write callback, you can get up to ++ /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This ++ /// usually means 100K. ++ /// ++ /// This function may be called with zero bytes data if the transferred file ++ /// is empty. ++ /// ++ /// The callback should return the number of bytes actually taken care of. ++ /// If that amount differs from the amount passed to your callback function, ++ /// it'll signal an error condition to the library. This will cause the ++ /// transfer to get aborted and the libcurl function used will return ++ /// an error with `is_write_error`. ++ /// ++ /// If your callback function returns `Err(WriteError::Pause)` it will cause ++ /// this transfer to become paused. See `unpause_write` for further details. ++ /// ++ /// By default data is sent into the void, and this corresponds to the ++ /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `write_function` to configure a ++ /// callback that can reference stack-local data. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// use std::io::{stdout, Write}; ++ /// use curl::easy::Easy; ++ /// ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// handle.write_function(|data| { ++ /// Ok(stdout().write(data).unwrap()) ++ /// }).unwrap(); ++ /// handle.perform().unwrap(); ++ /// ``` ++ /// ++ /// Writing to a stack-local buffer ++ /// ++ /// ``` ++ /// use std::io::{stdout, Write}; ++ /// use curl::easy::Easy; ++ /// ++ /// let mut buf = Vec::new(); ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// ++ /// let mut transfer = handle.transfer(); ++ /// transfer.write_function(|data| { ++ /// buf.extend_from_slice(data); ++ /// Ok(data.len()) ++ /// }).unwrap(); ++ /// transfer.perform().unwrap(); ++ /// ``` ++ pub fn write_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&[u8]) -> Result + Send + 'static ++ { ++ self.inner.get_mut().owned.write = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Read callback for data uploads. ++ /// ++ /// This callback function gets called by libcurl as soon as it needs to ++ /// read data in order to send it to the peer - like if you ask it to upload ++ /// or post data to the server. ++ /// ++ /// Your function must then return the actual number of bytes that it stored ++ /// in that memory area. Returning 0 will signal end-of-file to the library ++ /// and cause it to stop the current transfer. ++ /// ++ /// If you stop the current transfer by returning 0 "pre-maturely" (i.e ++ /// before the server expected it, like when you've said you will upload N ++ /// bytes and you upload less than N bytes), you may experience that the ++ /// server "hangs" waiting for the rest of the data that won't come. ++ /// ++ /// The read callback may return `Err(ReadError::Abort)` to stop the ++ /// current operation immediately, resulting in a `is_aborted_by_callback` ++ /// error code from the transfer. ++ /// ++ /// The callback can return `Err(ReadError::Pause)` to cause reading from ++ /// this connection to pause. See `unpause_read` for further details. ++ /// ++ /// By default data not input, and this corresponds to the ++ /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `read_function` to configure a ++ /// callback that can reference stack-local data. ++ /// ++ /// # Examples ++ /// ++ /// Read input from stdin ++ /// ++ /// ```no_run ++ /// use std::io::{stdin, Read}; ++ /// use curl::easy::Easy; ++ /// ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://example.com/login").unwrap(); ++ /// handle.read_function(|into| { ++ /// Ok(stdin().read(into).unwrap()) ++ /// }).unwrap(); ++ /// handle.post(true).unwrap(); ++ /// handle.perform().unwrap(); ++ /// ``` ++ /// ++ /// Reading from stack-local data: ++ /// ++ /// ```no_run ++ /// use std::io::{stdin, Read}; ++ /// use curl::easy::Easy; ++ /// ++ /// let mut data_to_upload = &b"foobar"[..]; ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://example.com/login").unwrap(); ++ /// handle.post(true).unwrap(); ++ /// ++ /// let mut transfer = handle.transfer(); ++ /// transfer.read_function(|into| { ++ /// Ok(data_to_upload.read(into).unwrap()) ++ /// }).unwrap(); ++ /// transfer.perform().unwrap(); ++ /// ``` ++ pub fn read_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&mut [u8]) -> Result + Send + 'static ++ { ++ self.inner.get_mut().owned.read = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// User callback for seeking in input stream. ++ /// ++ /// This function gets called by libcurl to seek to a certain position in ++ /// the input stream and can be used to fast forward a file in a resumed ++ /// upload (instead of reading all uploaded bytes with the normal read ++ /// function/callback). It is also called to rewind a stream when data has ++ /// already been sent to the server and needs to be sent again. This may ++ /// happen when doing a HTTP PUT or POST with a multi-pass authentication ++ /// method, or when an existing HTTP connection is reused too late and the ++ /// server closes the connection. ++ /// ++ /// The callback function must return `SeekResult::Ok` on success, ++ /// `SeekResult::Fail` to cause the upload operation to fail or ++ /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl ++ /// is free to work around the problem if possible. The latter can sometimes ++ /// be done by instead reading from the input or similar. ++ /// ++ /// By default data this option is not set, and this corresponds to the ++ /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `seek_function` to configure a ++ /// callback that can reference stack-local data. ++ pub fn seek_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(SeekFrom) -> SeekResult + Send + 'static ++ { ++ self.inner.get_mut().owned.seek = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Callback to progress meter function ++ /// ++ /// This function gets called by libcurl instead of its internal equivalent ++ /// with a frequent interval. While data is being transferred it will be ++ /// called very frequently, and during slow periods like when nothing is ++ /// being transferred it can slow down to about one call per second. ++ /// ++ /// The callback gets told how much data libcurl will transfer and has ++ /// transferred, in number of bytes. The first argument is the total number ++ /// of bytes libcurl expects to download in this transfer. The second ++ /// argument is the number of bytes downloaded so far. The third argument is ++ /// the total number of bytes libcurl expects to upload in this transfer. ++ /// The fourth argument is the number of bytes uploaded so far. ++ /// ++ /// Unknown/unused argument values passed to the callback will be set to ++ /// zero (like if you only download data, the upload size will remain 0). ++ /// Many times the callback will be called one or more times first, before ++ /// it knows the data sizes so a program must be made to handle that. ++ /// ++ /// Returning `false` from this callback will cause libcurl to abort the ++ /// transfer and return `is_aborted_by_callback`. ++ /// ++ /// If you transfer data with the multi interface, this function will not be ++ /// called during periods of idleness unless you call the appropriate ++ /// libcurl function that performs transfers. ++ /// ++ /// `progress` must be set to `true` to make this function actually get ++ /// called. ++ /// ++ /// By default this function calls an internal method and corresponds to ++ /// `CURLOPT_PROGRESSFUNCTION` and `CURLOPT_PROGRESSDATA`. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `progress_function` to configure a ++ /// callback that can reference stack-local data. ++ pub fn progress_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(f64, f64, f64, f64) -> bool + Send + 'static ++ { ++ self.inner.get_mut().owned.progress = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Callback to SSL context ++ /// ++ /// This callback function gets called by libcurl just before the ++ /// initialization of an SSL connection after having processed all ++ /// other SSL related options to give a last chance to an ++ /// application to modify the behaviour of the SSL ++ /// initialization. The `ssl_ctx` parameter is actually a pointer ++ /// to the SSL library's SSL_CTX. If an error is returned from the ++ /// callback no attempt to establish a connection is made and the ++ /// perform operation will return the callback's error code. ++ /// ++ /// This function will get called on all new connections made to a ++ /// server, during the SSL negotiation. The SSL_CTX pointer will ++ /// be a new one every time. ++ /// ++ /// To use this properly, a non-trivial amount of knowledge of ++ /// your SSL library is necessary. For example, you can use this ++ /// function to call library-specific callbacks to add additional ++ /// validation code for certificates, and even to change the ++ /// actual URI of a HTTPS request. ++ /// ++ /// By default this function calls an internal method and ++ /// corresponds to `CURLOPT_SSL_CTX_FUNCTION` and ++ /// `CURLOPT_SSL_CTX_DATA`. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `progress_function` to configure a ++ /// callback that can reference stack-local data. ++ pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'static ++ { ++ self.inner.get_mut().owned.ssl_ctx = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Specify a debug callback ++ /// ++ /// `debug_function` replaces the standard debug function used when ++ /// `verbose` is in effect. This callback receives debug information, ++ /// as specified in the type argument. ++ /// ++ /// By default this option is not set and corresponds to the ++ /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `debug_function` to configure a ++ /// callback that can reference stack-local data. ++ pub fn debug_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(InfoType, &[u8]) + Send + 'static ++ { ++ self.inner.get_mut().owned.debug = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Callback that receives header data ++ /// ++ /// This function gets called by libcurl as soon as it has received header ++ /// data. The header callback will be called once for each header and only ++ /// complete header lines are passed on to the callback. Parsing headers is ++ /// very easy using this. If this callback returns `false` it'll signal an ++ /// error to the library. This will cause the transfer to get aborted and ++ /// the libcurl function in progress will return `is_write_error`. ++ /// ++ /// A complete HTTP header that is passed to this function can be up to ++ /// CURL_MAX_HTTP_HEADER (100K) bytes. ++ /// ++ /// It's important to note that the callback will be invoked for the headers ++ /// of all responses received after initiating a request and not just the ++ /// final response. This includes all responses which occur during ++ /// authentication negotiation. If you need to operate on only the headers ++ /// from the final response, you will need to collect headers in the ++ /// callback yourself and use HTTP status lines, for example, to delimit ++ /// response boundaries. ++ /// ++ /// When a server sends a chunked encoded transfer, it may contain a ++ /// trailer. That trailer is identical to a HTTP header and if such a ++ /// trailer is received it is passed to the application using this callback ++ /// as well. There are several ways to detect it being a trailer and not an ++ /// ordinary header: 1) it comes after the response-body. 2) it comes after ++ /// the final header line (CR LF) 3) a Trailer: header among the regular ++ /// response-headers mention what header(s) to expect in the trailer. ++ /// ++ /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will ++ /// get called with the server responses to the commands that libcurl sends. ++ /// ++ /// By default this option is not set and corresponds to the ++ /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `header_function` to configure a ++ /// callback that can reference stack-local data. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// use std::str; ++ /// ++ /// use curl::easy::Easy; ++ /// ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// handle.header_function(|header| { ++ /// print!("header: {}", str::from_utf8(header).unwrap()); ++ /// true ++ /// }).unwrap(); ++ /// handle.perform().unwrap(); ++ /// ``` ++ /// ++ /// Collecting headers to a stack local vector ++ /// ++ /// ``` ++ /// use std::str; ++ /// ++ /// use curl::easy::Easy; ++ /// ++ /// let mut headers = Vec::new(); ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// ++ /// { ++ /// let mut transfer = handle.transfer(); ++ /// transfer.header_function(|header| { ++ /// headers.push(str::from_utf8(header).unwrap().to_string()); ++ /// true ++ /// }).unwrap(); ++ /// transfer.perform().unwrap(); ++ /// } ++ /// ++ /// println!("{:?}", headers); ++ /// ``` ++ pub fn header_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&[u8]) -> bool + Send + 'static ++ { ++ self.inner.get_mut().owned.header = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ // ========================================================================= ++ // Error options ++ ++ // TODO: error buffer and stderr ++ ++ /// Same as [`Easy2::fail_on_error`](struct.Easy2.html#method.fail_on_error) ++ pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { ++ self.inner.fail_on_error(fail) ++ } ++ ++ // ========================================================================= ++ // Network options ++ ++ /// Same as [`Easy2::url`](struct.Easy2.html#method.url) ++ pub fn url(&mut self, url: &str) -> Result<(), Error> { ++ self.inner.url(url) ++ } ++ ++ /// Same as [`Easy2::port`](struct.Easy2.html#method.port) ++ pub fn port(&mut self, port: u16) -> Result<(), Error> { ++ self.inner.port(port) ++ } ++ ++ /// Same as [`Easy2::proxy`](struct.Easy2.html#method.proxy) ++ pub fn proxy(&mut self, url: &str) -> Result<(), Error> { ++ self.inner.proxy(url) ++ } ++ ++ /// Same as [`Easy2::proxy_port`](struct.Easy2.html#method.proxy_port) ++ pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { ++ self.inner.proxy_port(port) ++ } ++ ++ /// Same as [`Easy2::proxy_type`](struct.Easy2.html#method.proxy_type) ++ pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { ++ self.inner.proxy_type(kind) ++ } ++ ++ /// Same as [`Easy2::noproxy`](struct.Easy2.html#method.noproxy) ++ pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { ++ self.inner.noproxy(skip) ++ } ++ ++ /// Same as [`Easy2::http_proxy_tunnel`](struct.Easy2.html#method.http_proxy_tunnel) ++ pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { ++ self.inner.http_proxy_tunnel(tunnel) ++ } ++ ++ /// Same as [`Easy2::interface`](struct.Easy2.html#method.interface) ++ pub fn interface(&mut self, interface: &str) -> Result<(), Error> { ++ self.inner.interface(interface) ++ } ++ ++ /// Same as [`Easy2::set_local_port`](struct.Easy2.html#method.set_local_port) ++ pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { ++ self.inner.set_local_port(port) ++ } ++ ++ /// Same as [`Easy2::local_port_range`](struct.Easy2.html#method.local_port_range) ++ pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { ++ self.inner.local_port_range(range) ++ } ++ ++ /// Same as [`Easy2::dns_cache_timeout`](struct.Easy2.html#method.dns_cache_timeout) ++ pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { ++ self.inner.dns_cache_timeout(dur) ++ } ++ ++ /// Same as [`Easy2::buffer_size`](struct.Easy2.html#method.buffer_size) ++ pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { ++ self.inner.buffer_size(size) ++ } ++ ++ /// Same as [`Easy2::tcp_nodelay`](struct.Easy2.html#method.tcp_nodelay) ++ pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.tcp_nodelay(enable) ++ } ++ ++ /// Same as [`Easy2::tcp_keepalive`](struct.Easy2.html#method.tcp_keepalive) ++ pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.tcp_keepalive(enable) ++ } ++ ++ /// Same as [`Easy2::tcp_keepintvl`](struct.Easy2.html#method.tcp_keepalive) ++ pub fn tcp_keepintvl(&mut self, dur: Duration) -> Result<(), Error> { ++ self.inner.tcp_keepintvl(dur) ++ } ++ ++ /// Same as [`Easy2::tcp_keepidle`](struct.Easy2.html#method.tcp_keepidle) ++ pub fn tcp_keepidle(&mut self, dur: Duration) -> Result<(), Error> { ++ self.inner.tcp_keepidle(dur) ++ } ++ ++ /// Same as [`Easy2::address_scope`](struct.Easy2.html#method.address_scope) ++ pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { ++ self.inner.address_scope(scope) ++ } ++ ++ // ========================================================================= ++ // Names and passwords ++ ++ /// Same as [`Easy2::username`](struct.Easy2.html#method.username) ++ pub fn username(&mut self, user: &str) -> Result<(), Error> { ++ self.inner.username(user) ++ } ++ ++ /// Same as [`Easy2::password`](struct.Easy2.html#method.password) ++ pub fn password(&mut self, pass: &str) -> Result<(), Error> { ++ self.inner.password(pass) ++ } ++ ++ /// Same as [`Easy2::http_auth`](struct.Easy2.html#method.http_auth) ++ pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { ++ self.inner.http_auth(auth) ++ } ++ ++ /// Same as [`Easy2::proxy_username`](struct.Easy2.html#method.proxy_username) ++ pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { ++ self.inner.proxy_username(user) ++ } ++ ++ /// Same as [`Easy2::proxy_password`](struct.Easy2.html#method.proxy_password) ++ pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { ++ self.inner.proxy_password(pass) ++ } ++ ++ /// Same as [`Easy2::proxy_auth`](struct.Easy2.html#method.proxy_auth) ++ pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { ++ self.inner.proxy_auth(auth) ++ } ++ ++ /// Same as [`Easy2::netrc`](struct.Easy2.html#method.netrc) ++ pub fn netrc(&mut self, netrc: NetRc) -> Result<(), Error> { ++ self.inner.netrc(netrc) ++ } ++ ++ // ========================================================================= ++ // HTTP Options ++ ++ /// Same as [`Easy2::autoreferer`](struct.Easy2.html#method.autoreferer) ++ pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.autoreferer(enable) ++ } ++ ++ /// Same as [`Easy2::accept_encoding`](struct.Easy2.html#method.accept_encoding) ++ pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { ++ self.inner.accept_encoding(encoding) ++ } ++ ++ /// Same as [`Easy2::transfer_encoding`](struct.Easy2.html#method.transfer_encoding) ++ pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.transfer_encoding(enable) ++ } ++ ++ /// Same as [`Easy2::follow_location`](struct.Easy2.html#method.follow_location) ++ pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.follow_location(enable) ++ } ++ ++ /// Same as [`Easy2::unrestricted_auth`](struct.Easy2.html#method.unrestricted_auth) ++ pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.unrestricted_auth(enable) ++ } ++ ++ /// Same as [`Easy2::max_redirections`](struct.Easy2.html#method.max_redirections) ++ pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { ++ self.inner.max_redirections(max) ++ } ++ ++ /// Same as [`Easy2::put`](struct.Easy2.html#method.put) ++ pub fn put(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.put(enable) ++ } ++ ++ /// Same as [`Easy2::post`](struct.Easy2.html#method.post) ++ pub fn post(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.post(enable) ++ } ++ ++ /// Same as [`Easy2::post_field_copy`](struct.Easy2.html#method.post_field_copy) ++ pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { ++ self.inner.post_fields_copy(data) ++ } ++ ++ /// Same as [`Easy2::post_field_size`](struct.Easy2.html#method.post_field_size) ++ pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { ++ self.inner.post_field_size(size) ++ } ++ ++ /// Same as [`Easy2::httppost`](struct.Easy2.html#method.httppost) ++ pub fn httppost(&mut self, form: Form) -> Result<(), Error> { ++ self.inner.httppost(form) ++ } ++ ++ /// Same as [`Easy2::referer`](struct.Easy2.html#method.referer) ++ pub fn referer(&mut self, referer: &str) -> Result<(), Error> { ++ self.inner.referer(referer) ++ } ++ ++ /// Same as [`Easy2::useragent`](struct.Easy2.html#method.useragent) ++ pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { ++ self.inner.useragent(useragent) ++ } ++ ++ /// Same as [`Easy2::http_headers`](struct.Easy2.html#method.http_headers) ++ pub fn http_headers(&mut self, list: List) -> Result<(), Error> { ++ self.inner.http_headers(list) ++ } ++ ++ /// Same as [`Easy2::cookie`](struct.Easy2.html#method.cookie) ++ pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { ++ self.inner.cookie(cookie) ++ } ++ ++ /// Same as [`Easy2::cookie_file`](struct.Easy2.html#method.cookie_file) ++ pub fn cookie_file>(&mut self, file: P) -> Result<(), Error> { ++ self.inner.cookie_file(file) ++ } ++ ++ /// Same as [`Easy2::cookie_jar`](struct.Easy2.html#method.cookie_jar) ++ pub fn cookie_jar>(&mut self, file: P) -> Result<(), Error> { ++ self.inner.cookie_jar(file) ++ } ++ ++ /// Same as [`Easy2::cookie_session`](struct.Easy2.html#method.cookie_session) ++ pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { ++ self.inner.cookie_session(session) ++ } ++ ++ /// Same as [`Easy2::cookie_list`](struct.Easy2.html#method.cookie_list) ++ pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { ++ self.inner.cookie_list(cookie) ++ } ++ ++ /// Same as [`Easy2::get`](struct.Easy2.html#method.get) ++ pub fn get(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.get(enable) ++ } ++ ++ /// Same as [`Easy2::ignore_content_length`](struct.Easy2.html#method.ignore_content_length) ++ pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { ++ self.inner.ignore_content_length(ignore) ++ } ++ ++ /// Same as [`Easy2::http_content_decoding`](struct.Easy2.html#method.http_content_decoding) ++ pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.http_content_decoding(enable) ++ } ++ ++ /// Same as [`Easy2::http_transfer_decoding`](struct.Easy2.html#method.http_transfer_decoding) ++ pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.http_transfer_decoding(enable) ++ } ++ ++ // ========================================================================= ++ // Protocol Options ++ ++ /// Same as [`Easy2::range`](struct.Easy2.html#method.range) ++ pub fn range(&mut self, range: &str) -> Result<(), Error> { ++ self.inner.range(range) ++ } ++ ++ /// Same as [`Easy2::resume_from`](struct.Easy2.html#method.resume_from) ++ pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { ++ self.inner.resume_from(from) ++ } ++ ++ /// Same as [`Easy2::custom_request`](struct.Easy2.html#method.custom_request) ++ pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { ++ self.inner.custom_request(request) ++ } ++ ++ /// Same as [`Easy2::fetch_filetime`](struct.Easy2.html#method.fetch_filetime) ++ pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { ++ self.inner.fetch_filetime(fetch) ++ } ++ ++ /// Same as [`Easy2::nobody`](struct.Easy2.html#method.nobody) ++ pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.nobody(enable) ++ } ++ ++ /// Same as [`Easy2::in_filesize`](struct.Easy2.html#method.in_filesize) ++ pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { ++ self.inner.in_filesize(size) ++ } ++ ++ /// Same as [`Easy2::upload`](struct.Easy2.html#method.upload) ++ pub fn upload(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.upload(enable) ++ } ++ ++ /// Same as [`Easy2::max_filesize`](struct.Easy2.html#method.max_filesize) ++ pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { ++ self.inner.max_filesize(size) ++ } ++ ++ /// Same as [`Easy2::time_condition`](struct.Easy2.html#method.time_condition) ++ pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { ++ self.inner.time_condition(cond) ++ } ++ ++ /// Same as [`Easy2::time_value`](struct.Easy2.html#method.time_value) ++ pub fn time_value(&mut self, val: i64) -> Result<(), Error> { ++ self.inner.time_value(val) ++ } ++ ++ // ========================================================================= ++ // Connection Options ++ ++ /// Same as [`Easy2::timeout`](struct.Easy2.html#method.timeout) ++ pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { ++ self.inner.timeout(timeout) ++ } ++ ++ /// Same as [`Easy2::low_speed_limit`](struct.Easy2.html#method.low_speed_limit) ++ pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { ++ self.inner.low_speed_limit(limit) ++ } ++ ++ /// Same as [`Easy2::low_speed_time`](struct.Easy2.html#method.low_speed_time) ++ pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { ++ self.inner.low_speed_time(dur) ++ } ++ ++ /// Same as [`Easy2::max_send_speed`](struct.Easy2.html#method.max_send_speed) ++ pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { ++ self.inner.max_send_speed(speed) ++ } ++ ++ /// Same as [`Easy2::max_recv_speed`](struct.Easy2.html#method.max_recv_speed) ++ pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { ++ self.inner.max_recv_speed(speed) ++ } ++ ++ /// Same as [`Easy2::max_connects`](struct.Easy2.html#method.max_connects) ++ pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { ++ self.inner.max_connects(max) ++ } ++ ++ /// Same as [`Easy2::fresh_connect`](struct.Easy2.html#method.fresh_connect) ++ pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.fresh_connect(enable) ++ } ++ ++ /// Same as [`Easy2::forbid_reuse`](struct.Easy2.html#method.forbid_reuse) ++ pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.forbid_reuse(enable) ++ } ++ ++ /// Same as [`Easy2::connect_timeout`](struct.Easy2.html#method.connect_timeout) ++ pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { ++ self.inner.connect_timeout(timeout) ++ } ++ ++ /// Same as [`Easy2::ip_resolve`](struct.Easy2.html#method.ip_resolve) ++ pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { ++ self.inner.ip_resolve(resolve) ++ } ++ ++ /// Same as [`Easy2::resolve`](struct.Easy2.html#method.resolve) ++ pub fn resolve(&mut self, list: List) -> Result<(), Error> { ++ self.inner.resolve(list) ++ } ++ ++ /// Same as [`Easy2::connect_only`](struct.Easy2.html#method.connect_only) ++ pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.connect_only(enable) ++ } ++ ++ // ========================================================================= ++ // SSL/Security Options ++ ++ /// Same as [`Easy2::ssl_cert`](struct.Easy2.html#method.ssl_cert) ++ pub fn ssl_cert>(&mut self, cert: P) -> Result<(), Error> { ++ self.inner.ssl_cert(cert) ++ } ++ ++ /// Same as [`Easy2::ssl_cert_type`](struct.Easy2.html#method.ssl_cert_type) ++ pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { ++ self.inner.ssl_cert_type(kind) ++ } ++ ++ /// Same as [`Easy2::ssl_key`](struct.Easy2.html#method.ssl_key) ++ pub fn ssl_key>(&mut self, key: P) -> Result<(), Error> { ++ self.inner.ssl_key(key) ++ } ++ ++ /// Same as [`Easy2::ssl_key_type`](struct.Easy2.html#method.ssl_key_type) ++ pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { ++ self.inner.ssl_key_type(kind) ++ } ++ ++ /// Same as [`Easy2::key_password`](struct.Easy2.html#method.key_password) ++ pub fn key_password(&mut self, password: &str) -> Result<(), Error> { ++ self.inner.key_password(password) ++ } ++ ++ /// Same as [`Easy2::ssl_engine`](struct.Easy2.html#method.ssl_engine) ++ pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { ++ self.inner.ssl_engine(engine) ++ } ++ ++ /// Same as [`Easy2::ssl_engine_default`](struct.Easy2.html#method.ssl_engine_default) ++ pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.ssl_engine_default(enable) ++ } ++ ++ /// Same as [`Easy2::http_version`](struct.Easy2.html#method.http_version) ++ pub fn http_version(&mut self, version: HttpVersion) -> Result<(), Error> { ++ self.inner.http_version(version) ++ } ++ ++ /// Same as [`Easy2::ssl_version`](struct.Easy2.html#method.ssl_version) ++ pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { ++ self.inner.ssl_version(version) ++ } ++ ++ /// Same as [`Easy2::ssl_verify_host`](struct.Easy2.html#method.ssl_verify_host) ++ pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { ++ self.inner.ssl_verify_host(verify) ++ } ++ ++ /// Same as [`Easy2::ssl_verify_peer`](struct.Easy2.html#method.ssl_verify_peer) ++ pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { ++ self.inner.ssl_verify_peer(verify) ++ } ++ ++ /// Same as [`Easy2::cainfo`](struct.Easy2.html#method.cainfo) ++ pub fn cainfo>(&mut self, path: P) -> Result<(), Error> { ++ self.inner.cainfo(path) ++ } ++ ++ /// Same as [`Easy2::issuer_cert`](struct.Easy2.html#method.issuer_cert) ++ pub fn issuer_cert>(&mut self, path: P) -> Result<(), Error> { ++ self.inner.issuer_cert(path) ++ } ++ ++ /// Same as [`Easy2::capath`](struct.Easy2.html#method.capath) ++ pub fn capath>(&mut self, path: P) -> Result<(), Error> { ++ self.inner.capath(path) ++ } ++ ++ /// Same as [`Easy2::crlfile`](struct.Easy2.html#method.crlfile) ++ pub fn crlfile>(&mut self, path: P) -> Result<(), Error> { ++ self.inner.crlfile(path) ++ } ++ ++ /// Same as [`Easy2::certinfo`](struct.Easy2.html#method.certinfo) ++ pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.certinfo(enable) ++ } ++ ++ /// Same as [`Easy2::random_file`](struct.Easy2.html#method.random_file) ++ pub fn random_file>(&mut self, p: P) -> Result<(), Error> { ++ self.inner.random_file(p) ++ } ++ ++ /// Same as [`Easy2::egd_socket`](struct.Easy2.html#method.egd_socket) ++ pub fn egd_socket>(&mut self, p: P) -> Result<(), Error> { ++ self.inner.egd_socket(p) ++ } ++ ++ /// Same as [`Easy2::ssl_cipher_list`](struct.Easy2.html#method.ssl_cipher_list) ++ pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { ++ self.inner.ssl_cipher_list(ciphers) ++ } ++ ++ /// Same as [`Easy2::ssl_sessionid_cache`](struct.Easy2.html#method.ssl_sessionid_cache) ++ pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { ++ self.inner.ssl_sessionid_cache(enable) ++ } ++ ++ /// Same as [`Easy2::ssl_options`](struct.Easy2.html#method.ssl_options) ++ pub fn ssl_options(&mut self, bits: &SslOpt) -> Result<(), Error> { ++ self.inner.ssl_options(bits) ++ } ++ ++ // ========================================================================= ++ // getters ++ ++ /// Same as [`Easy2::time_condition_unmet`](struct.Easy2.html#method.time_condition_unmet) ++ pub fn time_condition_unmet(&mut self) -> Result { ++ self.inner.time_condition_unmet() ++ } ++ ++ /// Same as [`Easy2::effective_url`](struct.Easy2.html#method.effective_url) ++ pub fn effective_url(&mut self) -> Result, Error> { ++ self.inner.effective_url() ++ } ++ ++ /// Same as [`Easy2::effective_url_bytes`](struct.Easy2.html#method.effective_url_bytes) ++ pub fn effective_url_bytes(&mut self) -> Result, Error> { ++ self.inner.effective_url_bytes() ++ } ++ ++ /// Same as [`Easy2::response_code`](struct.Easy2.html#method.response_code) ++ pub fn response_code(&mut self) -> Result { ++ self.inner.response_code() ++ } ++ ++ /// Same as [`Easy2::http_connectcode`](struct.Easy2.html#method.http_connectcode) ++ pub fn http_connectcode(&mut self) -> Result { ++ self.inner.http_connectcode() ++ } ++ ++ /// Same as [`Easy2::filetime`](struct.Easy2.html#method.filetime) ++ pub fn filetime(&mut self) -> Result, Error> { ++ self.inner.filetime() ++ } ++ ++ /// Same as [`Easy2::download_size`](struct.Easy2.html#method.download_size) ++ pub fn download_size(&mut self) -> Result { ++ self.inner.download_size() ++ } ++ /// Same as [`Easy2::content_length_download`](struct.Easy2.html#method.content_length_download) ++ pub fn content_length_download(&mut self) -> Result { ++ self.inner.content_length_download() ++ } ++ ++ /// Same as [`Easy2::total_time`](struct.Easy2.html#method.total_time) ++ pub fn total_time(&mut self) -> Result { ++ self.inner.total_time() ++ } ++ ++ /// Same as [`Easy2::namelookup_time`](struct.Easy2.html#method.namelookup_time) ++ pub fn namelookup_time(&mut self) -> Result { ++ self.inner.namelookup_time() ++ } ++ ++ /// Same as [`Easy2::connect_time`](struct.Easy2.html#method.connect_time) ++ pub fn connect_time(&mut self) -> Result { ++ self.inner.connect_time() ++ } ++ ++ /// Same as [`Easy2::appconnect_time`](struct.Easy2.html#method.appconnect_time) ++ pub fn appconnect_time(&mut self) -> Result { ++ self.inner.appconnect_time() ++ } ++ ++ /// Same as [`Easy2::pretransfer_time`](struct.Easy2.html#method.pretransfer_time) ++ pub fn pretransfer_time(&mut self) -> Result { ++ self.inner.pretransfer_time() ++ } ++ ++ /// Same as [`Easy2::starttransfer_time`](struct.Easy2.html#method.starttransfer_time) ++ pub fn starttransfer_time(&mut self) -> Result { ++ self.inner.starttransfer_time() ++ } ++ ++ /// Same as [`Easy2::redirect_time`](struct.Easy2.html#method.redirect_time) ++ pub fn redirect_time(&mut self) -> Result { ++ self.inner.redirect_time() ++ } ++ ++ /// Same as [`Easy2::redirect_count`](struct.Easy2.html#method.redirect_count) ++ pub fn redirect_count(&mut self) -> Result { ++ self.inner.redirect_count() ++ } ++ ++ /// Same as [`Easy2::redirect_url`](struct.Easy2.html#method.redirect_url) ++ pub fn redirect_url(&mut self) -> Result, Error> { ++ self.inner.redirect_url() ++ } ++ ++ /// Same as [`Easy2::redirect_url_bytes`](struct.Easy2.html#method.redirect_url_bytes) ++ pub fn redirect_url_bytes(&mut self) -> Result, Error> { ++ self.inner.redirect_url_bytes() ++ } ++ ++ /// Same as [`Easy2::header_size`](struct.Easy2.html#method.header_size) ++ pub fn header_size(&mut self) -> Result { ++ self.inner.header_size() ++ } ++ ++ /// Same as [`Easy2::request_size`](struct.Easy2.html#method.request_size) ++ pub fn request_size(&mut self) -> Result { ++ self.inner.request_size() ++ } ++ ++ /// Same as [`Easy2::content_type`](struct.Easy2.html#method.content_type) ++ pub fn content_type(&mut self) -> Result, Error> { ++ self.inner.content_type() ++ } ++ ++ /// Same as [`Easy2::content_type_bytes`](struct.Easy2.html#method.content_type_bytes) ++ pub fn content_type_bytes(&mut self) -> Result, Error> { ++ self.inner.content_type_bytes() ++ } ++ ++ /// Same as [`Easy2::os_errno`](struct.Easy2.html#method.os_errno) ++ pub fn os_errno(&mut self) -> Result { ++ self.inner.os_errno() ++ } ++ ++ /// Same as [`Easy2::primary_ip`](struct.Easy2.html#method.primary_ip) ++ pub fn primary_ip(&mut self) -> Result, Error> { ++ self.inner.primary_ip() ++ } ++ ++ /// Same as [`Easy2::primary_port`](struct.Easy2.html#method.primary_port) ++ pub fn primary_port(&mut self) -> Result { ++ self.inner.primary_port() ++ } ++ ++ /// Same as [`Easy2::local_ip`](struct.Easy2.html#method.local_ip) ++ pub fn local_ip(&mut self) -> Result, Error> { ++ self.inner.local_ip() ++ } ++ ++ /// Same as [`Easy2::local_port`](struct.Easy2.html#method.local_port) ++ pub fn local_port(&mut self) -> Result { ++ self.inner.local_port() ++ } ++ ++ /// Same as [`Easy2::cookies`](struct.Easy2.html#method.cookies) ++ pub fn cookies(&mut self) -> Result { ++ self.inner.cookies() ++ } ++ ++ // ========================================================================= ++ // Other methods ++ ++ /// Same as [`Easy2::perform`](struct.Easy2.html#method.perform) ++ pub fn perform(&self) -> Result<(), Error> { ++ assert!(self.inner.get_ref().borrowed.get().is_null()); ++ self.do_perform() ++ } ++ ++ fn do_perform(&self) -> Result<(), Error> { ++ // We don't allow recursive invocations of `perform` because we're ++ // invoking `FnMut`closures behind a `&self` pointer. This flag acts as ++ // our own `RefCell` borrow flag sorta. ++ if self.inner.get_ref().running.get() { ++ return Err(Error::new(curl_sys::CURLE_FAILED_INIT)) ++ } ++ ++ self.inner.get_ref().running.set(true); ++ struct Reset<'a>(&'a Cell); ++ impl<'a> Drop for Reset<'a> { ++ fn drop(&mut self) { ++ self.0.set(false); ++ } ++ } ++ let _reset = Reset(&self.inner.get_ref().running); ++ ++ self.inner.perform() ++ } ++ ++ /// Creates a new scoped transfer which can be used to set callbacks and ++ /// data which only live for the scope of the returned object. ++ /// ++ /// An `Easy` handle is often reused between different requests to cache ++ /// connections to servers, but often the lifetime of the data as part of ++ /// each transfer is unique. This function serves as an ability to share an ++ /// `Easy` across many transfers while ergonomically using possibly ++ /// stack-local data as part of each transfer. ++ /// ++ /// Configuration can be set on the `Easy` and then a `Transfer` can be ++ /// created to set scoped configuration (like callbacks). Finally, the ++ /// `perform` method on the `Transfer` function can be used. ++ /// ++ /// When the `Transfer` option is dropped then all configuration set on the ++ /// transfer itself will be reset. ++ pub fn transfer<'data, 'easy>(&'easy mut self) -> Transfer<'easy, 'data> { ++ assert!(!self.inner.get_ref().running.get()); ++ Transfer { ++ data: Box::new(Callbacks::default()), ++ easy: self, ++ } ++ } ++ ++ /// Same as [`Easy2::unpause_read`](struct.Easy2.html#method.unpause_read) ++ pub fn unpause_read(&self) -> Result<(), Error> { ++ self.inner.unpause_read() ++ } ++ ++ /// Same as [`Easy2::unpause_write`](struct.Easy2.html#method.unpause_write) ++ pub fn unpause_write(&self) -> Result<(), Error> { ++ self.inner.unpause_write() ++ } ++ ++ /// Same as [`Easy2::url_encode`](struct.Easy2.html#method.url_encode) ++ pub fn url_encode(&mut self, s: &[u8]) -> String { ++ self.inner.url_encode(s) ++ } ++ ++ /// Same as [`Easy2::url_decode`](struct.Easy2.html#method.url_decode) ++ pub fn url_decode(&mut self, s: &str) -> Vec { ++ self.inner.url_decode(s) ++ } ++ ++ /// Same as [`Easy2::reset`](struct.Easy2.html#method.reset) ++ pub fn reset(&mut self) { ++ self.inner.reset() ++ } ++ ++ /// Same as [`Easy2::recv`](struct.Easy2.html#method.recv) ++ pub fn recv(&mut self, data: &mut [u8]) -> Result { ++ self.inner.recv(data) ++ } ++ ++ /// Same as [`Easy2::send`](struct.Easy2.html#method.send) ++ pub fn send(&mut self, data: &[u8]) -> Result { ++ self.inner.send(data) ++ } ++ ++ /// Same as [`Easy2::raw`](struct.Easy2.html#method.raw) ++ pub fn raw(&self) -> *mut curl_sys::CURL { ++ self.inner.raw() ++ } ++} ++ ++impl EasyData { ++ /// An unsafe function to get the appropriate callback field. ++ /// ++ /// We can have callbacks configured from one of two different sources. ++ /// We could either have a callback from the `borrowed` field, callbacks on ++ /// an ephemeral `Transfer`, or the `owned` field which are `'static` ++ /// callbacks that live for the lifetime of this `EasyData`. ++ /// ++ /// The first set of callbacks are unsafe to access because they're actually ++ /// owned elsewhere and we're just aliasing. Additionally they don't ++ /// technically live long enough for us to access them, so they're hidden ++ /// behind unsafe pointers and casts. ++ /// ++ /// This function returns `&'a mut T` but that's actually somewhat of a lie. ++ /// The value should **not be stored to** nor should it be used for the full ++ /// lifetime of `'a`, but rather immediately in the local scope. ++ /// ++ /// Basically this is just intended to acquire a callback, invoke it, and ++ /// then stop. Nothing else. Super unsafe. ++ unsafe fn callback<'a, T, F>(&'a mut self, f: F) -> Option<&'a mut T> ++ where F: for<'b> Fn(&'b mut Callbacks<'static>) -> &'b mut Option, ++ { ++ let ptr = self.borrowed.get(); ++ if !ptr.is_null() { ++ let val = f(&mut *ptr); ++ if val.is_some() { ++ return val.as_mut() ++ } ++ } ++ f(&mut self.owned).as_mut() ++ } ++} ++ ++impl Handler for EasyData { ++ fn write(&mut self, data: &[u8]) -> Result { ++ unsafe { ++ match self.callback(|s| &mut s.write) { ++ Some(write) => write(data), ++ None => Ok(data.len()), ++ } ++ } ++ } ++ ++ fn read(&mut self, data: &mut [u8]) -> Result { ++ unsafe { ++ match self.callback(|s| &mut s.read) { ++ Some(read) => read(data), ++ None => Ok(0), ++ } ++ } ++ } ++ ++ fn seek(&mut self, whence: SeekFrom) -> SeekResult { ++ unsafe { ++ match self.callback(|s| &mut s.seek) { ++ Some(seek) => seek(whence), ++ None => SeekResult::CantSeek, ++ } ++ } ++ } ++ ++ fn debug(&mut self, kind: InfoType, data: &[u8]) { ++ unsafe { ++ match self.callback(|s| &mut s.debug) { ++ Some(debug) => debug(kind, data), ++ None => handler::debug(kind, data), ++ } ++ } ++ } ++ ++ fn header(&mut self, data: &[u8]) -> bool { ++ unsafe { ++ match self.callback(|s| &mut s.header) { ++ Some(header) => header(data), ++ None => true, ++ } ++ } ++ } ++ ++ fn progress(&mut self, ++ dltotal: f64, ++ dlnow: f64, ++ ultotal: f64, ++ ulnow: f64) -> bool { ++ unsafe { ++ match self.callback(|s| &mut s.progress) { ++ Some(progress) => progress(dltotal, dlnow, ultotal, ulnow), ++ None => true, ++ } ++ } ++ } ++ ++ fn ssl_ctx(&mut self, cx: *mut c_void) -> Result<(), Error> { ++ unsafe { ++ match self.callback(|s| &mut s.ssl_ctx) { ++ Some(ssl_ctx) => ssl_ctx(cx), ++ None => handler::ssl_ctx(cx), ++ } ++ } ++ } ++} ++ ++impl fmt::Debug for EasyData { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ "callbacks ...".fmt(f) ++ } ++} ++ ++impl<'easy, 'data> Transfer<'easy, 'data> { ++ /// Same as `Easy::write_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn write_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&[u8]) -> Result + 'data ++ { ++ self.data.write = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::read_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn read_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&mut [u8]) -> Result + 'data ++ { ++ self.data.read = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::seek_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn seek_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(SeekFrom) -> SeekResult + 'data ++ { ++ self.data.seek = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::progress_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn progress_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(f64, f64, f64, f64) -> bool + 'data ++ { ++ self.data.progress = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::ssl_ctx_function`, just takes a non `'static` ++ /// lifetime corresponding to the lifetime of this transfer. ++ pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'data ++ { ++ self.data.ssl_ctx = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::debug_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn debug_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(InfoType, &[u8]) + 'data ++ { ++ self.data.debug = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::header_function`, just takes a non `'static` lifetime ++ /// corresponding to the lifetime of this transfer. ++ pub fn header_function(&mut self, f: F) -> Result<(), Error> ++ where F: FnMut(&[u8]) -> bool + 'data ++ { ++ self.data.header = Some(Box::new(f)); ++ Ok(()) ++ } ++ ++ /// Same as `Easy::transfer`. ++ pub fn perform(&self) -> Result<(), Error> { ++ let inner = self.easy.inner.get_ref(); ++ ++ // Note that we're casting a `&self` pointer to a `*mut`, and then ++ // during the invocation of this call we're going to invoke `FnMut` ++ // closures that we ourselves own. ++ // ++ // This should be ok, however, because `do_perform` checks for recursive ++ // invocations of `perform` and disallows them. Our type also isn't ++ // `Sync`. ++ inner.borrowed.set(&*self.data as *const _ as *mut _); ++ ++ // Make sure to reset everything back to the way it was before when ++ // we're done. ++ struct Reset<'a>(&'a Cell<*mut Callbacks<'static>>); ++ impl<'a> Drop for Reset<'a> { ++ fn drop(&mut self) { ++ self.0.set(ptr::null_mut()); ++ } ++ } ++ let _reset = Reset(&inner.borrowed); ++ ++ self.easy.do_perform() ++ } ++ ++ /// Same as `Easy::unpause_read`. ++ pub fn unpause_read(&self) -> Result<(), Error> { ++ self.easy.unpause_read() ++ } ++ ++ /// Same as `Easy::unpause_write` ++ pub fn unpause_write(&self) -> Result<(), Error> { ++ self.easy.unpause_write() ++ } ++} ++ ++impl<'easy, 'data> fmt::Debug for Transfer<'easy, 'data> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Transfer") ++ .field("easy", &self.easy) ++ .finish() ++ } ++} ++ ++impl<'easy, 'data> Drop for Transfer<'easy, 'data> { ++ fn drop(&mut self) { ++ // Extra double check to make sure we don't leak a pointer to ourselves. ++ assert!(self.easy.inner.get_ref().borrowed.get().is_null()); ++ } ++} diff --cc vendor/curl-0.4.14/src/easy/handler.rs index 000000000,000000000..7b3db7907 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/handler.rs @@@ -1,0 -1,0 +1,3185 @@@ ++use std::cell::RefCell; ++use std::ffi::{CStr, CString}; ++use std::fmt; ++use std::io::{self, SeekFrom, Write}; ++use std::path::Path; ++use std::slice; ++use std::str; ++use std::time::Duration; ++ ++use curl_sys; ++use libc::{self, c_void, c_char, c_long, size_t, c_int, c_double, c_ulong}; ++use socket2::Socket; ++ ++use Error; ++use easy::form; ++use easy::list; ++use easy::{List, Form}; ++use easy::windows; ++use panic; ++ ++/// A trait for the various callbacks used by libcurl to invoke user code. ++/// ++/// This trait represents all operations that libcurl can possibly invoke a ++/// client for code during an HTTP transaction. Each callback has a default ++/// "noop" implementation, the same as in libcurl. Types implementing this trait ++/// may simply override the relevant functions to learn about the callbacks ++/// they're interested in. ++/// ++/// # Examples ++/// ++/// ``` ++/// use curl::easy::{Easy2, Handler, WriteError}; ++/// ++/// struct Collector(Vec); ++/// ++/// impl Handler for Collector { ++/// fn write(&mut self, data: &[u8]) -> Result { ++/// self.0.extend_from_slice(data); ++/// Ok(data.len()) ++/// } ++/// } ++/// ++/// let mut easy = Easy2::new(Collector(Vec::new())); ++/// easy.get(true).unwrap(); ++/// easy.url("https://www.rust-lang.org/").unwrap(); ++/// easy.perform().unwrap(); ++/// ++/// assert_eq!(easy.response_code().unwrap(), 200); ++/// let contents = easy.get_ref(); ++/// println!("{}", String::from_utf8_lossy(&contents.0)); ++/// ``` ++pub trait Handler { ++ /// Callback invoked whenever curl has downloaded data for the application. ++ /// ++ /// This callback function gets called by libcurl as soon as there is data ++ /// received that needs to be saved. ++ /// ++ /// The callback function will be passed as much data as possible in all ++ /// invokes, but you must not make any assumptions. It may be one byte, it ++ /// may be thousands. If `show_header` is enabled, which makes header data ++ /// get passed to the write callback, you can get up to ++ /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This ++ /// usually means 100K. ++ /// ++ /// This function may be called with zero bytes data if the transferred file ++ /// is empty. ++ /// ++ /// The callback should return the number of bytes actually taken care of. ++ /// If that amount differs from the amount passed to your callback function, ++ /// it'll signal an error condition to the library. This will cause the ++ /// transfer to get aborted and the libcurl function used will return ++ /// an error with `is_write_error`. ++ /// ++ /// If your callback function returns `Err(WriteError::Pause)` it will cause ++ /// this transfer to become paused. See `unpause_write` for further details. ++ /// ++ /// By default data is sent into the void, and this corresponds to the ++ /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. ++ fn write(&mut self, data: &[u8]) -> Result { ++ Ok(data.len()) ++ } ++ ++ /// Read callback for data uploads. ++ /// ++ /// This callback function gets called by libcurl as soon as it needs to ++ /// read data in order to send it to the peer - like if you ask it to upload ++ /// or post data to the server. ++ /// ++ /// Your function must then return the actual number of bytes that it stored ++ /// in that memory area. Returning 0 will signal end-of-file to the library ++ /// and cause it to stop the current transfer. ++ /// ++ /// If you stop the current transfer by returning 0 "pre-maturely" (i.e ++ /// before the server expected it, like when you've said you will upload N ++ /// bytes and you upload less than N bytes), you may experience that the ++ /// server "hangs" waiting for the rest of the data that won't come. ++ /// ++ /// The read callback may return `Err(ReadError::Abort)` to stop the ++ /// current operation immediately, resulting in a `is_aborted_by_callback` ++ /// error code from the transfer. ++ /// ++ /// The callback can return `Err(ReadError::Pause)` to cause reading from ++ /// this connection to pause. See `unpause_read` for further details. ++ /// ++ /// By default data not input, and this corresponds to the ++ /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. ++ /// ++ /// Note that the lifetime bound on this function is `'static`, but that ++ /// is often too restrictive. To use stack data consider calling the ++ /// `transfer` method and then using `read_function` to configure a ++ /// callback that can reference stack-local data. ++ fn read(&mut self, data: &mut [u8]) -> Result { ++ drop(data); ++ Ok(0) ++ } ++ ++ /// User callback for seeking in input stream. ++ /// ++ /// This function gets called by libcurl to seek to a certain position in ++ /// the input stream and can be used to fast forward a file in a resumed ++ /// upload (instead of reading all uploaded bytes with the normal read ++ /// function/callback). It is also called to rewind a stream when data has ++ /// already been sent to the server and needs to be sent again. This may ++ /// happen when doing a HTTP PUT or POST with a multi-pass authentication ++ /// method, or when an existing HTTP connection is reused too late and the ++ /// server closes the connection. ++ /// ++ /// The callback function must return `SeekResult::Ok` on success, ++ /// `SeekResult::Fail` to cause the upload operation to fail or ++ /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl ++ /// is free to work around the problem if possible. The latter can sometimes ++ /// be done by instead reading from the input or similar. ++ /// ++ /// By default data this option is not set, and this corresponds to the ++ /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. ++ fn seek(&mut self, whence: SeekFrom) -> SeekResult { ++ drop(whence); ++ SeekResult::CantSeek ++ } ++ ++ /// Specify a debug callback ++ /// ++ /// `debug_function` replaces the standard debug function used when ++ /// `verbose` is in effect. This callback receives debug information, ++ /// as specified in the type argument. ++ /// ++ /// By default this option is not set and corresponds to the ++ /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. ++ fn debug(&mut self, kind: InfoType, data: &[u8]) { ++ debug(kind, data) ++ } ++ ++ /// Callback that receives header data ++ /// ++ /// This function gets called by libcurl as soon as it has received header ++ /// data. The header callback will be called once for each header and only ++ /// complete header lines are passed on to the callback. Parsing headers is ++ /// very easy using this. If this callback returns `false` it'll signal an ++ /// error to the library. This will cause the transfer to get aborted and ++ /// the libcurl function in progress will return `is_write_error`. ++ /// ++ /// A complete HTTP header that is passed to this function can be up to ++ /// CURL_MAX_HTTP_HEADER (100K) bytes. ++ /// ++ /// It's important to note that the callback will be invoked for the headers ++ /// of all responses received after initiating a request and not just the ++ /// final response. This includes all responses which occur during ++ /// authentication negotiation. If you need to operate on only the headers ++ /// from the final response, you will need to collect headers in the ++ /// callback yourself and use HTTP status lines, for example, to delimit ++ /// response boundaries. ++ /// ++ /// When a server sends a chunked encoded transfer, it may contain a ++ /// trailer. That trailer is identical to a HTTP header and if such a ++ /// trailer is received it is passed to the application using this callback ++ /// as well. There are several ways to detect it being a trailer and not an ++ /// ordinary header: 1) it comes after the response-body. 2) it comes after ++ /// the final header line (CR LF) 3) a Trailer: header among the regular ++ /// response-headers mention what header(s) to expect in the trailer. ++ /// ++ /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will ++ /// get called with the server responses to the commands that libcurl sends. ++ /// ++ /// By default this option is not set and corresponds to the ++ /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. ++ fn header(&mut self, data: &[u8]) -> bool { ++ drop(data); ++ true ++ } ++ ++ /// Callback to progress meter function ++ /// ++ /// This function gets called by libcurl instead of its internal equivalent ++ /// with a frequent interval. While data is being transferred it will be ++ /// called very frequently, and during slow periods like when nothing is ++ /// being transferred it can slow down to about one call per second. ++ /// ++ /// The callback gets told how much data libcurl will transfer and has ++ /// transferred, in number of bytes. The first argument is the total number ++ /// of bytes libcurl expects to download in this transfer. The second ++ /// argument is the number of bytes downloaded so far. The third argument is ++ /// the total number of bytes libcurl expects to upload in this transfer. ++ /// The fourth argument is the number of bytes uploaded so far. ++ /// ++ /// Unknown/unused argument values passed to the callback will be set to ++ /// zero (like if you only download data, the upload size will remain 0). ++ /// Many times the callback will be called one or more times first, before ++ /// it knows the data sizes so a program must be made to handle that. ++ /// ++ /// Returning `false` from this callback will cause libcurl to abort the ++ /// transfer and return `is_aborted_by_callback`. ++ /// ++ /// If you transfer data with the multi interface, this function will not be ++ /// called during periods of idleness unless you call the appropriate ++ /// libcurl function that performs transfers. ++ /// ++ /// `progress` must be set to `true` to make this function actually get ++ /// called. ++ /// ++ /// By default this function calls an internal method and corresponds to ++ /// `CURLOPT_PROGRESSFUNCTION` and `CURLOPT_PROGRESSDATA`. ++ fn progress(&mut self, ++ dltotal: f64, ++ dlnow: f64, ++ ultotal: f64, ++ ulnow: f64) -> bool { ++ drop((dltotal, dlnow, ultotal, ulnow)); ++ true ++ } ++ ++ /// Callback to SSL context ++ /// ++ /// This callback function gets called by libcurl just before the ++ /// initialization of an SSL connection after having processed all ++ /// other SSL related options to give a last chance to an ++ /// application to modify the behaviour of the SSL ++ /// initialization. The `ssl_ctx` parameter is actually a pointer ++ /// to the SSL library's SSL_CTX. If an error is returned from the ++ /// callback no attempt to establish a connection is made and the ++ /// perform operation will return the callback's error code. ++ /// ++ /// This function will get called on all new connections made to a ++ /// server, during the SSL negotiation. The SSL_CTX pointer will ++ /// be a new one every time. ++ /// ++ /// To use this properly, a non-trivial amount of knowledge of ++ /// your SSL library is necessary. For example, you can use this ++ /// function to call library-specific callbacks to add additional ++ /// validation code for certificates, and even to change the ++ /// actual URI of a HTTPS request. ++ /// ++ /// By default this function calls an internal method and ++ /// corresponds to `CURLOPT_SSL_CTX_FUNCTION` and ++ /// `CURLOPT_SSL_CTX_DATA`. ++ /// ++ /// Note that this callback is not guaranteed to be called, not all versions ++ /// of libcurl support calling this callback. ++ fn ssl_ctx(&mut self, cx: *mut c_void) -> Result<(), Error> { ++ // By default, if we're on an OpenSSL enabled libcurl and we're on ++ // Windows, add the system's certificate store to OpenSSL's certificate ++ // store. ++ ssl_ctx(cx) ++ } ++ ++ /// Callback to open sockets for libcurl. ++ /// ++ /// This callback function gets called by libcurl instead of the socket(2) ++ /// call. The callback function should return the newly created socket ++ /// or `None` in case no connection could be established or another ++ /// error was detected. Any additional `setsockopt(2)` calls can of course ++ /// be done on the socket at the user's discretion. A `None` return ++ /// value from the callback function will signal an unrecoverable error to ++ /// libcurl and it will return `is_couldnt_connect` from the function that ++ /// triggered this callback. ++ /// ++ /// By default this function opens a standard socket and ++ /// corresponds to `CURLOPT_OPENSOCKETFUNCTION `. ++ fn open_socket(&mut self, ++ family: c_int, ++ socktype: c_int, ++ protocol: c_int) -> Option { ++ // Note that we override this to calling a function in `socket2` to ++ // ensure that we open all sockets with CLOEXEC. Otherwise if we rely on ++ // libcurl to open sockets it won't use CLOEXEC. ++ return Socket::new(family.into(), socktype.into(), Some(protocol.into())) ++ .ok() ++ .map(cvt); ++ ++ #[cfg(unix)] ++ fn cvt(socket: Socket) -> curl_sys::curl_socket_t { ++ use std::os::unix::prelude::*; ++ socket.into_raw_fd() ++ } ++ ++ #[cfg(windows)] ++ fn cvt(socket: Socket) -> curl_sys::curl_socket_t { ++ use std::os::windows::prelude::*; ++ socket.into_raw_socket() ++ } ++ } ++} ++ ++pub fn debug(kind: InfoType, data: &[u8]) { ++ let out = io::stderr(); ++ let prefix = match kind { ++ InfoType::Text => "*", ++ InfoType::HeaderIn => "<", ++ InfoType::HeaderOut => ">", ++ InfoType::DataIn | ++ InfoType::SslDataIn => "{", ++ InfoType::DataOut | ++ InfoType::SslDataOut => "}", ++ InfoType::__Nonexhaustive => " ", ++ }; ++ let mut out = out.lock(); ++ drop(write!(out, "{} ", prefix)); ++ drop(out.write_all(data)); ++} ++ ++pub fn ssl_ctx(cx: *mut c_void) -> Result<(), Error> { ++ windows::add_certs_to_context(cx); ++ Ok(()) ++} ++ ++/// Raw bindings to a libcurl "easy session". ++/// ++/// This type corresponds to the `CURL` type in libcurl, and is probably what ++/// you want for just sending off a simple HTTP request and fetching a response. ++/// Each easy handle can be thought of as a large builder before calling the ++/// final `perform` function. ++/// ++/// There are many many configuration options for each `Easy2` handle, and they ++/// should all have their own documentation indicating what it affects and how ++/// it interacts with other options. Some implementations of libcurl can use ++/// this handle to interact with many different protocols, although by default ++/// this crate only guarantees the HTTP/HTTPS protocols working. ++/// ++/// Note that almost all methods on this structure which configure various ++/// properties return a `Result`. This is largely used to detect whether the ++/// underlying implementation of libcurl actually implements the option being ++/// requested. If you're linked to a version of libcurl which doesn't support ++/// the option, then an error will be returned. Some options also perform some ++/// validation when they're set, and the error is returned through this vector. ++/// ++/// Note that historically this library contained an `Easy` handle so this one's ++/// called `Easy2`. The major difference between the `Easy` type is that an ++/// `Easy2` structure uses a trait instead of closures for all of the callbacks ++/// that curl can invoke. The `Easy` type is actually built on top of this ++/// `Easy` type, and this `Easy2` type can be more flexible in some situations ++/// due to the generic parameter. ++/// ++/// There's not necessarily a right answer for which type is correct to use, but ++/// as a general rule of thumb `Easy` is typically a reasonable choice for ++/// synchronous I/O and `Easy2` is a good choice for asynchronous I/O. ++/// ++/// # Examples ++/// ++/// ``` ++/// use curl::easy::{Easy2, Handler, WriteError}; ++/// ++/// struct Collector(Vec); ++/// ++/// impl Handler for Collector { ++/// fn write(&mut self, data: &[u8]) -> Result { ++/// self.0.extend_from_slice(data); ++/// Ok(data.len()) ++/// } ++/// } ++/// ++/// let mut easy = Easy2::new(Collector(Vec::new())); ++/// easy.get(true).unwrap(); ++/// easy.url("https://www.rust-lang.org/").unwrap(); ++/// easy.perform().unwrap(); ++/// ++/// assert_eq!(easy.response_code().unwrap(), 200); ++/// let contents = easy.get_ref(); ++/// println!("{}", String::from_utf8_lossy(&contents.0)); ++/// ``` ++pub struct Easy2 { ++ inner: Box>, ++} ++ ++struct Inner { ++ handle: *mut curl_sys::CURL, ++ header_list: Option, ++ resolve_list: Option, ++ form: Option

, ++ error_buf: RefCell>, ++ handler: H, ++} ++ ++unsafe impl Send for Inner {} ++ ++/// Possible proxy types that libcurl currently understands. ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum ProxyType { ++ Http = curl_sys::CURLPROXY_HTTP as isize, ++ Http1 = curl_sys::CURLPROXY_HTTP_1_0 as isize, ++ Socks4 = curl_sys::CURLPROXY_SOCKS4 as isize, ++ Socks5 = curl_sys::CURLPROXY_SOCKS5 as isize, ++ Socks4a = curl_sys::CURLPROXY_SOCKS4A as isize, ++ Socks5Hostname = curl_sys::CURLPROXY_SOCKS5_HOSTNAME as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// Possible conditions for the `time_condition` method. ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum TimeCondition { ++ None = curl_sys::CURL_TIMECOND_NONE as isize, ++ IfModifiedSince = curl_sys::CURL_TIMECOND_IFMODSINCE as isize, ++ IfUnmodifiedSince = curl_sys::CURL_TIMECOND_IFUNMODSINCE as isize, ++ LastModified = curl_sys::CURL_TIMECOND_LASTMOD as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// Possible values to pass to the `ip_resolve` method. ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum IpResolve { ++ V4 = curl_sys::CURL_IPRESOLVE_V4 as isize, ++ V6 = curl_sys::CURL_IPRESOLVE_V6 as isize, ++ Any = curl_sys::CURL_IPRESOLVE_WHATEVER as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive = 500, ++} ++ ++/// Possible values to pass to the `http_version` method. ++#[derive(Debug)] ++pub enum HttpVersion { ++ /// We don't care what http version to use, and we'd like the library to ++ /// choose the best possible for us. ++ Any = curl_sys::CURL_HTTP_VERSION_NONE as isize, ++ ++ /// Please use HTTP 1.0 in the request ++ V10 = curl_sys::CURL_HTTP_VERSION_1_0 as isize, ++ ++ /// Please use HTTP 1.1 in the request ++ V11 = curl_sys::CURL_HTTP_VERSION_1_1 as isize, ++ ++ /// Please use HTTP 2 in the request ++ /// (Added in CURL 7.33.0) ++ V2 = curl_sys::CURL_HTTP_VERSION_2_0 as isize, ++ ++ /// Use version 2 for HTTPS, version 1.1 for HTTP ++ /// (Added in CURL 7.47.0) ++ V2TLS = curl_sys::CURL_HTTP_VERSION_2TLS as isize, ++ ++ /// Please use HTTP 2 without HTTP/1.1 Upgrade ++ /// (Added in CURL 7.49.0) ++ V2PriorKnowledge = curl_sys::CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive = 500, ++} ++ ++/// Possible values to pass to the `ip_resolve` method. ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum SslVersion { ++ Default = curl_sys::CURL_SSLVERSION_DEFAULT as isize, ++ Tlsv1 = curl_sys::CURL_SSLVERSION_TLSv1 as isize, ++ Sslv2 = curl_sys::CURL_SSLVERSION_SSLv2 as isize, ++ Sslv3 = curl_sys::CURL_SSLVERSION_SSLv3 as isize, ++ // Tlsv10 = curl_sys::CURL_SSLVERSION_TLSv1_0 as isize, ++ // Tlsv11 = curl_sys::CURL_SSLVERSION_TLSv1_1 as isize, ++ // Tlsv12 = curl_sys::CURL_SSLVERSION_TLSv1_2 as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive = 500, ++} ++ ++/// Possible return values from the `seek_function` callback. ++#[derive(Debug)] ++pub enum SeekResult { ++ /// Indicates that the seek operation was a success ++ Ok = curl_sys::CURL_SEEKFUNC_OK as isize, ++ ++ /// Indicates that the seek operation failed, and the entire request should ++ /// fail as a result. ++ Fail = curl_sys::CURL_SEEKFUNC_FAIL as isize, ++ ++ /// Indicates that although the seek failed libcurl should attempt to keep ++ /// working if possible (for example "seek" through reading). ++ CantSeek = curl_sys::CURL_SEEKFUNC_CANTSEEK as isize, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive = 500, ++} ++ ++/// Possible data chunks that can be witnessed as part of the `debug_function` ++/// callback. ++#[derive(Debug)] ++pub enum InfoType { ++ /// The data is informational text. ++ Text, ++ ++ /// The data is header (or header-like) data received from the peer. ++ HeaderIn, ++ ++ /// The data is header (or header-like) data sent to the peer. ++ HeaderOut, ++ ++ /// The data is protocol data received from the peer. ++ DataIn, ++ ++ /// The data is protocol data sent to the peer. ++ DataOut, ++ ++ /// The data is SSL/TLS (binary) data received from the peer. ++ SslDataIn, ++ ++ /// The data is SSL/TLS (binary) data sent to the peer. ++ SslDataOut, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// Possible error codes that can be returned from the `read_function` callback. ++#[derive(Debug)] ++pub enum ReadError { ++ /// Indicates that the connection should be aborted immediately ++ Abort, ++ ++ /// Indicates that reading should be paused until `unpause` is called. ++ Pause, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// Possible error codes that can be returned from the `write_function` callback. ++#[derive(Debug)] ++pub enum WriteError { ++ /// Indicates that reading should be paused until `unpause` is called. ++ Pause, ++ ++ /// Hidden variant to indicate that this enum should not be matched on, it ++ /// may grow over time. ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// Options for `.netrc` parsing. ++#[derive(Debug)] ++pub enum NetRc { ++ /// Ignoring `.netrc` file and use information from url ++ /// ++ /// This option is default ++ Ignored = curl_sys::CURL_NETRC_IGNORED as isize, ++ ++ /// The use of your `~/.netrc` file is optional, and information in the URL is to be ++ /// preferred. The file will be scanned for the host and user name (to find the password only) ++ /// or for the host only, to find the first user name and password after that machine, which ++ /// ever information is not specified in the URL. ++ Optional = curl_sys::CURL_NETRC_OPTIONAL as isize, ++ ++ /// This value tells the library that use of the file is required, to ignore the information in ++ /// the URL, and to search the file for the host only. ++ Required = curl_sys::CURL_NETRC_REQUIRED as isize, ++} ++ ++/// Structure which stores possible authentication methods to get passed to ++/// `http_auth` and `proxy_auth`. ++#[derive(Clone)] ++pub struct Auth { ++ bits: c_long, ++} ++ ++/// Structure which stores possible ssl options to pass to `ssl_options`. ++#[derive(Clone)] ++pub struct SslOpt { ++ bits: c_long, ++} ++ ++impl Easy2 { ++ /// Creates a new "easy" handle which is the core of almost all operations ++ /// in libcurl. ++ /// ++ /// To use a handle, applications typically configure a number of options ++ /// followed by a call to `perform`. Options are preserved across calls to ++ /// `perform` and need to be reset manually (or via the `reset` method) if ++ /// this is not desired. ++ pub fn new(handler: H) -> Easy2 { ++ ::init(); ++ unsafe { ++ let handle = curl_sys::curl_easy_init(); ++ assert!(!handle.is_null()); ++ let mut ret = Easy2 { ++ inner: Box::new(Inner { ++ handle: handle, ++ header_list: None, ++ resolve_list: None, ++ form: None, ++ error_buf: RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]), ++ handler: handler, ++ }), ++ }; ++ ret.default_configure(); ++ return ret ++ } ++ } ++ ++ /// Re-initializes this handle to the default values. ++ /// ++ /// This puts the handle to the same state as it was in when it was just ++ /// created. This does, however, keep live connections, the session id ++ /// cache, the dns cache, and cookies. ++ pub fn reset(&mut self) { ++ unsafe { ++ curl_sys::curl_easy_reset(self.inner.handle); ++ } ++ self.default_configure(); ++ } ++ ++ fn default_configure(&mut self) { ++ self.setopt_ptr(curl_sys::CURLOPT_ERRORBUFFER, ++ self.inner.error_buf.borrow().as_ptr() as *const _) ++ .expect("failed to set error buffer"); ++ let _ = self.signal(false); ++ self.ssl_configure(); ++ ++ let ptr = &*self.inner as *const _ as *const _; ++ ++ let cb: extern fn(*mut c_char, size_t, size_t, *mut c_void) -> size_t ++ = header_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_HEADERFUNCTION, cb as *const _) ++ .expect("failed to set header callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_HEADERDATA, ptr) ++ .expect("failed to set header callback"); ++ ++ let cb: curl_sys::curl_write_callback = write_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_WRITEFUNCTION, cb as *const _) ++ .expect("failed to set write callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_WRITEDATA, ptr) ++ .expect("failed to set write callback"); ++ ++ let cb: curl_sys::curl_read_callback = read_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_READFUNCTION, cb as *const _) ++ .expect("failed to set read callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_READDATA, ptr) ++ .expect("failed to set read callback"); ++ ++ let cb: curl_sys::curl_seek_callback = seek_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_SEEKFUNCTION, cb as *const _) ++ .expect("failed to set seek callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_SEEKDATA, ptr) ++ .expect("failed to set seek callback"); ++ ++ let cb: curl_sys::curl_progress_callback = progress_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_PROGRESSFUNCTION, cb as *const _) ++ .expect("failed to set progress callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_PROGRESSDATA, ptr) ++ .expect("failed to set progress callback"); ++ ++ let cb: curl_sys::curl_debug_callback = debug_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_DEBUGFUNCTION, cb as *const _) ++ .expect("failed to set debug callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_DEBUGDATA, ptr) ++ .expect("failed to set debug callback"); ++ ++ let cb: curl_sys::curl_ssl_ctx_callback = ssl_ctx_cb::; ++ drop(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_FUNCTION, cb as *const _)); ++ drop(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_DATA, ptr)); ++ ++ let cb: curl_sys::curl_opensocket_callback = opensocket_cb::; ++ self.setopt_ptr(curl_sys::CURLOPT_OPENSOCKETFUNCTION , cb as *const _) ++ .expect("failed to set open socket callback"); ++ self.setopt_ptr(curl_sys::CURLOPT_OPENSOCKETDATA, ptr) ++ .expect("failed to set open socket callback"); ++ } ++ ++ #[cfg(all(unix, not(target_os = "macos")))] ++ fn ssl_configure(&mut self) { ++ let probe = ::openssl_probe::probe(); ++ if let Some(ref path) = probe.cert_file { ++ let _ = self.cainfo(path); ++ } ++ if let Some(ref path) = probe.cert_dir { ++ let _ = self.capath(path); ++ } ++ } ++ ++ #[cfg(not(all(unix, not(target_os = "macos"))))] ++ fn ssl_configure(&mut self) {} ++} ++ ++impl Easy2 { ++ // ========================================================================= ++ // Behavior options ++ ++ /// Configures this handle to have verbose output to help debug protocol ++ /// information. ++ /// ++ /// By default output goes to stderr, but the `stderr` function on this type ++ /// can configure that. You can also use the `debug_function` method to get ++ /// all protocol data sent and received. ++ /// ++ /// By default, this option is `false`. ++ pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_VERBOSE, verbose as c_long) ++ } ++ ++ /// Indicates whether header information is streamed to the output body of ++ /// this request. ++ /// ++ /// This option is only relevant for protocols which have header metadata ++ /// (like http or ftp). It's not generally possible to extract headers ++ /// from the body if using this method, that use case should be intended for ++ /// the `header_function` method. ++ /// ++ /// To set HTTP headers, use the `http_header` method. ++ /// ++ /// By default, this option is `false` and corresponds to ++ /// `CURLOPT_HEADER`. ++ pub fn show_header(&mut self, show: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HEADER, show as c_long) ++ } ++ ++ /// Indicates whether a progress meter will be shown for requests done with ++ /// this handle. ++ /// ++ /// This will also prevent the `progress_function` from being called. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_NOPROGRESS`. ++ pub fn progress(&mut self, progress: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_NOPROGRESS, ++ (!progress) as c_long) ++ } ++ ++ /// Inform libcurl whether or not it should install signal handlers or ++ /// attempt to use signals to perform library functions. ++ /// ++ /// If this option is disabled then timeouts during name resolution will not ++ /// work unless libcurl is built against c-ares. Note that enabling this ++ /// option, however, may not cause libcurl to work with multiple threads. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_NOSIGNAL`. ++ /// Note that this default is **different than libcurl** as it is intended ++ /// that this library is threadsafe by default. See the [libcurl docs] for ++ /// some more information. ++ /// ++ /// [libcurl docs]: https://curl.haxx.se/libcurl/c/threadsafe.html ++ pub fn signal(&mut self, signal: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_NOSIGNAL, ++ (!signal) as c_long) ++ } ++ ++ /// Indicates whether multiple files will be transferred based on the file ++ /// name pattern. ++ /// ++ /// The last part of a filename uses fnmatch-like pattern matching. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_WILDCARDMATCH`. ++ pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) ++ } ++ ++ /// Provides the unix domain socket which this handle will work with. ++ /// ++ /// The string provided must be unix domain socket -encoded with the format: ++ /// ++ /// ```text ++ /// /path/file.sock ++ /// ``` ++ pub fn unix_socket(&mut self, unix_domain_socket: &str) -> Result<(), Error> { ++ let socket = try!(CString::new(unix_domain_socket)); ++ self.setopt_str(curl_sys::CURLOPT_UNIX_SOCKET_PATH, &socket) ++ } ++ ++ ++ // ========================================================================= ++ // Internal accessors ++ ++ /// Acquires a reference to the underlying handler for events. ++ pub fn get_ref(&self) -> &H { ++ &self.inner.handler ++ } ++ ++ /// Acquires a reference to the underlying handler for events. ++ pub fn get_mut(&mut self) -> &mut H { ++ &mut self.inner.handler ++ } ++ ++ // ========================================================================= ++ // Error options ++ ++ // TODO: error buffer and stderr ++ ++ /// Indicates whether this library will fail on HTTP response codes >= 400. ++ /// ++ /// This method is not fail-safe especially when authentication is involved. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_FAILONERROR`. ++ pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_FAILONERROR, fail as c_long) ++ } ++ ++ // ========================================================================= ++ // Network options ++ ++ /// Provides the URL which this handle will work with. ++ /// ++ /// The string provided must be URL-encoded with the format: ++ /// ++ /// ```text ++ /// scheme://host:port/path ++ /// ``` ++ /// ++ /// The syntax is not validated as part of this function and that is ++ /// deferred until later. ++ /// ++ /// By default this option is not set and `perform` will not work until it ++ /// is set. This option corresponds to `CURLOPT_URL`. ++ pub fn url(&mut self, url: &str) -> Result<(), Error> { ++ let url = try!(CString::new(url)); ++ self.setopt_str(curl_sys::CURLOPT_URL, &url) ++ } ++ ++ /// Configures the port number to connect to, instead of the one specified ++ /// in the URL or the default of the protocol. ++ pub fn port(&mut self, port: u16) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) ++ } ++ ++ // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. ++ // /// ++ // /// By default this option is `false` and corresponds to ++ // /// `CURLOPT_PATH_AS_IS`. ++ // pub fn path_as_is(&mut self, as_is: bool) -> Result<(), Error> { ++ // } ++ ++ /// Provide the URL of a proxy to use. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_PROXY`. ++ pub fn proxy(&mut self, url: &str) -> Result<(), Error> { ++ let url = try!(CString::new(url)); ++ self.setopt_str(curl_sys::CURLOPT_PROXY, &url) ++ } ++ ++ /// Provide port number the proxy is listening on. ++ /// ++ /// By default this option is not set (the default port for the proxy ++ /// protocol is used) and corresponds to `CURLOPT_PROXYPORT`. ++ pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) ++ } ++ ++ /// Indicates the type of proxy being used. ++ /// ++ /// By default this option is `ProxyType::Http` and corresponds to ++ /// `CURLOPT_PROXYTYPE`. ++ pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_PROXYTYPE, kind as c_long) ++ } ++ ++ /// Provide a list of hosts that should not be proxied to. ++ /// ++ /// This string is a comma-separated list of hosts which should not use the ++ /// proxy specified for connections. A single `*` character is also accepted ++ /// as a wildcard for all hosts. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_NOPROXY`. ++ pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { ++ let skip = try!(CString::new(skip)); ++ self.setopt_str(curl_sys::CURLOPT_PROXYTYPE, &skip) ++ } ++ ++ /// Inform curl whether it should tunnel all operations through the proxy. ++ /// ++ /// This essentially means that a `CONNECT` is sent to the proxy for all ++ /// outbound requests. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_HTTPPROXYTUNNEL`. ++ pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTPPROXYTUNNEL, ++ tunnel as c_long) ++ } ++ ++ /// Tell curl which interface to bind to for an outgoing network interface. ++ /// ++ /// The interface name, IP address, or host name can be specified here. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_INTERFACE`. ++ pub fn interface(&mut self, interface: &str) -> Result<(), Error> { ++ let s = try!(CString::new(interface)); ++ self.setopt_str(curl_sys::CURLOPT_INTERFACE, &s) ++ } ++ ++ /// Indicate which port should be bound to locally for this connection. ++ /// ++ /// By default this option is 0 (any port) and corresponds to ++ /// `CURLOPT_LOCALPORT`. ++ pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_LOCALPORT, port as c_long) ++ } ++ ++ /// Indicates the number of attempts libcurl will perform to find a working ++ /// port number. ++ /// ++ /// By default this option is 1 and corresponds to ++ /// `CURLOPT_LOCALPORTRANGE`. ++ pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_LOCALPORTRANGE, ++ range as c_long) ++ } ++ ++ /// Sets the timeout of how long name resolves will be kept in memory. ++ /// ++ /// This is distinct from DNS TTL options and is entirely speculative. ++ /// ++ /// By default this option is 60s and corresponds to ++ /// `CURLOPT_DNS_CACHE_TIMEOUT`. ++ pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_DNS_CACHE_TIMEOUT, ++ dur.as_secs() as c_long) ++ } ++ ++ /// Specify the preferred receive buffer size, in bytes. ++ /// ++ /// This is treated as a request, not an order, and the main point of this ++ /// is that the write callback may get called more often with smaller ++ /// chunks. ++ /// ++ /// By default this option is the maximum write size and corresopnds to ++ /// `CURLOPT_BUFFERSIZE`. ++ pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_BUFFERSIZE, size as c_long) ++ } ++ ++ // /// Enable or disable TCP Fast Open ++ // /// ++ // /// By default this options defaults to `false` and corresponds to ++ // /// `CURLOPT_TCP_FASTOPEN` ++ // pub fn fast_open(&mut self, enable: bool) -> Result<(), Error> { ++ // } ++ ++ /// Configures whether the TCP_NODELAY option is set, or Nagle's algorithm ++ /// is disabled. ++ /// ++ /// The purpose of Nagle's algorithm is to minimize the number of small ++ /// packet's on the network, and disabling this may be less efficient in ++ /// some situations. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_TCP_NODELAY`. ++ pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TCP_NODELAY, enable as c_long) ++ } ++ ++ /// Configures whether TCP keepalive probes will be sent. ++ /// ++ /// The delay and frequency of these probes is controlled by `tcp_keepidle` ++ /// and `tcp_keepintvl`. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_TCP_KEEPALIVE`. ++ pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TCP_KEEPALIVE, enable as c_long) ++ } ++ ++ /// Configures the TCP keepalive idle time wait. ++ /// ++ /// This is the delay, after which the connection is idle, keepalive probes ++ /// will be sent. Not all operating systems support this. ++ /// ++ /// By default this corresponds to `CURLOPT_TCP_KEEPIDLE`. ++ pub fn tcp_keepidle(&mut self, amt: Duration) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TCP_KEEPIDLE, ++ amt.as_secs() as c_long) ++ } ++ ++ /// Configures the delay between keepalive probes. ++ /// ++ /// By default this corresponds to `CURLOPT_TCP_KEEPINTVL`. ++ pub fn tcp_keepintvl(&mut self, amt: Duration) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TCP_KEEPINTVL, ++ amt.as_secs() as c_long) ++ } ++ ++ /// Configures the scope for local IPv6 addresses. ++ /// ++ /// Sets the scope_id value to use when connecting to IPv6 or link-local ++ /// addresses. ++ /// ++ /// By default this value is 0 and corresponds to `CURLOPT_ADDRESS_SCOPE` ++ pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_ADDRESS_SCOPE, ++ scope as c_long) ++ } ++ ++ // ========================================================================= ++ // Names and passwords ++ ++ /// Configures the username to pass as authentication for this connection. ++ /// ++ /// By default this value is not set and corresponds to `CURLOPT_USERNAME`. ++ pub fn username(&mut self, user: &str) -> Result<(), Error> { ++ let user = try!(CString::new(user)); ++ self.setopt_str(curl_sys::CURLOPT_USERNAME, &user) ++ } ++ ++ /// Configures the password to pass as authentication for this connection. ++ /// ++ /// By default this value is not set and corresponds to `CURLOPT_PASSWORD`. ++ pub fn password(&mut self, pass: &str) -> Result<(), Error> { ++ let pass = try!(CString::new(pass)); ++ self.setopt_str(curl_sys::CURLOPT_PASSWORD, &pass) ++ } ++ ++ /// Set HTTP server authentication methods to try ++ /// ++ /// If more than one method is set, libcurl will first query the site to see ++ /// which authentication methods it supports and then pick the best one you ++ /// allow it to use. For some methods, this will induce an extra network ++ /// round-trip. Set the actual name and password with the `password` and ++ /// `username` methods. ++ /// ++ /// For authentication with a proxy, see `proxy_auth`. ++ /// ++ /// By default this value is basic and corresponds to `CURLOPT_HTTPAUTH`. ++ pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTPAUTH, auth.bits) ++ } ++ ++ /// Configures the proxy username to pass as authentication for this ++ /// connection. ++ /// ++ /// By default this value is not set and corresponds to ++ /// `CURLOPT_PROXYUSERNAME`. ++ pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { ++ let user = try!(CString::new(user)); ++ self.setopt_str(curl_sys::CURLOPT_PROXYUSERNAME, &user) ++ } ++ ++ /// Configures the proxy password to pass as authentication for this ++ /// connection. ++ /// ++ /// By default this value is not set and corresponds to ++ /// `CURLOPT_PROXYPASSWORD`. ++ pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { ++ let pass = try!(CString::new(pass)); ++ self.setopt_str(curl_sys::CURLOPT_PROXYPASSWORD, &pass) ++ } ++ ++ /// Set HTTP proxy authentication methods to try ++ /// ++ /// If more than one method is set, libcurl will first query the site to see ++ /// which authentication methods it supports and then pick the best one you ++ /// allow it to use. For some methods, this will induce an extra network ++ /// round-trip. Set the actual name and password with the `proxy_password` ++ /// and `proxy_username` methods. ++ /// ++ /// By default this value is basic and corresponds to `CURLOPT_PROXYAUTH`. ++ pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_PROXYAUTH, auth.bits) ++ } ++ ++ /// Enable .netrc parsing ++ /// ++ /// By default the .netrc file is ignored and corresponds to `CURL_NETRC_IGNORED`. ++ pub fn netrc(&mut self, netrc: NetRc) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_NETRC, netrc as c_long) ++ } ++ ++ // ========================================================================= ++ // HTTP Options ++ ++ /// Indicates whether the referer header is automatically updated ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_AUTOREFERER`. ++ pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_AUTOREFERER, enable as c_long) ++ } ++ ++ /// Enables automatic decompression of HTTP downloads. ++ /// ++ /// Sets the contents of the Accept-Encoding header sent in an HTTP request. ++ /// This enables decoding of a response with Content-Encoding. ++ /// ++ /// Currently supported encoding are `identity`, `zlib`, and `gzip`. A ++ /// zero-length string passed in will send all accepted encodings. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_ACCEPT_ENCODING`. ++ pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { ++ let encoding = try!(CString::new(encoding)); ++ self.setopt_str(curl_sys::CURLOPT_ACCEPT_ENCODING, &encoding) ++ } ++ ++ /// Request the HTTP Transfer Encoding. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_TRANSFER_ENCODING`. ++ pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TRANSFER_ENCODING, enable as c_long) ++ } ++ ++ /// Follow HTTP 3xx redirects. ++ /// ++ /// Indicates whether any `Location` headers in the response should get ++ /// followed. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_FOLLOWLOCATION`. ++ pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_FOLLOWLOCATION, enable as c_long) ++ } ++ ++ /// Send credentials to hosts other than the first as well. ++ /// ++ /// Sends username/password credentials even when the host changes as part ++ /// of a redirect. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_UNRESTRICTED_AUTH`. ++ pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_UNRESTRICTED_AUTH, enable as c_long) ++ } ++ ++ /// Set the maximum number of redirects allowed. ++ /// ++ /// A value of 0 will refuse any redirect. ++ /// ++ /// By default this option is `-1` (unlimited) and corresponds to ++ /// `CURLOPT_MAXREDIRS`. ++ pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_MAXREDIRS, max as c_long) ++ } ++ ++ // TODO: post_redirections ++ ++ /// Make an HTTP PUT request. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_PUT`. ++ pub fn put(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_PUT, enable as c_long) ++ } ++ ++ /// Make an HTTP POST request. ++ /// ++ /// This will also make the library use the ++ /// `Content-Type: application/x-www-form-urlencoded` header. ++ /// ++ /// POST data can be specified through `post_fields` or by specifying a read ++ /// function. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_POST`. ++ pub fn post(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_POST, enable as c_long) ++ } ++ ++ /// Configures the data that will be uploaded as part of a POST. ++ /// ++ /// Note that the data is copied into this handle and if that's not desired ++ /// then the read callbacks can be used instead. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_COPYPOSTFIELDS`. ++ pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { ++ // Set the length before the pointer so libcurl knows how much to read ++ try!(self.post_field_size(data.len() as u64)); ++ self.setopt_ptr(curl_sys::CURLOPT_COPYPOSTFIELDS, ++ data.as_ptr() as *const _) ++ } ++ ++ /// Configures the size of data that's going to be uploaded as part of a ++ /// POST operation. ++ /// ++ /// This is called automaticsally as part of `post_fields` and should only ++ /// be called if data is being provided in a read callback (and even then ++ /// it's optional). ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_POSTFIELDSIZE_LARGE`. ++ pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { ++ // Clear anything previous to ensure we don't read past a buffer ++ try!(self.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, 0 as *const _)); ++ self.setopt_off_t(curl_sys::CURLOPT_POSTFIELDSIZE_LARGE, ++ size as curl_sys::curl_off_t) ++ } ++ ++ /// Tells libcurl you want a multipart/formdata HTTP POST to be made and you ++ /// instruct what data to pass on to the server in the `form` argument. ++ /// ++ /// By default this option is set to null and corresponds to ++ /// `CURLOPT_HTTPPOST`. ++ pub fn httppost(&mut self, form: Form) -> Result<(), Error> { ++ try!(self.setopt_ptr(curl_sys::CURLOPT_HTTPPOST, ++ form::raw(&form) as *const _)); ++ self.inner.form = Some(form); ++ Ok(()) ++ } ++ ++ /// Sets the HTTP referer header ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_REFERER`. ++ pub fn referer(&mut self, referer: &str) -> Result<(), Error> { ++ let referer = try!(CString::new(referer)); ++ self.setopt_str(curl_sys::CURLOPT_REFERER, &referer) ++ } ++ ++ /// Sets the HTTP user-agent header ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_USERAGENT`. ++ pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { ++ let useragent = try!(CString::new(useragent)); ++ self.setopt_str(curl_sys::CURLOPT_USERAGENT, &useragent) ++ } ++ ++ /// Add some headers to this HTTP request. ++ /// ++ /// If you add a header that is otherwise used internally, the value here ++ /// takes precedence. If a header is added with no content (like `Accept:`) ++ /// the internally the header will get disabled. To add a header with no ++ /// content, use the form `MyHeader;` (not the trailing semicolon). ++ /// ++ /// Headers must not be CRLF terminated. Many replaced headers have common ++ /// shortcuts which should be prefered. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_HTTPHEADER` ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// use curl::easy::{Easy, List}; ++ /// ++ /// let mut list = List::new(); ++ /// list.append("Foo: bar").unwrap(); ++ /// list.append("Bar: baz").unwrap(); ++ /// ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// handle.http_headers(list).unwrap(); ++ /// handle.perform().unwrap(); ++ /// ``` ++ pub fn http_headers(&mut self, list: List) -> Result<(), Error> { ++ let ptr = list::raw(&list); ++ self.inner.header_list = Some(list); ++ self.setopt_ptr(curl_sys::CURLOPT_HTTPHEADER, ptr as *const _) ++ } ++ ++ // /// Add some headers to send to the HTTP proxy. ++ // /// ++ // /// This function is essentially the same as `http_headers`. ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_PROXYHEADER` ++ // pub fn proxy_headers(&mut self, list: &'a List) -> Result<(), Error> { ++ // self.setopt_ptr(curl_sys::CURLOPT_PROXYHEADER, list.raw as *const _) ++ // } ++ ++ /// Set the contents of the HTTP Cookie header. ++ /// ++ /// Pass a string of the form `name=contents` for one cookie value or ++ /// `name1=val1; name2=val2` for multiple values. ++ /// ++ /// Using this option multiple times will only make the latest string ++ /// override the previous ones. This option will not enable the cookie ++ /// engine, use `cookie_file` or `cookie_jar` to do that. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_COOKIE`. ++ pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { ++ let cookie = try!(CString::new(cookie)); ++ self.setopt_str(curl_sys::CURLOPT_COOKIE, &cookie) ++ } ++ ++ /// Set the file name to read cookies from. ++ /// ++ /// The cookie data can be in either the old Netscape / Mozilla cookie data ++ /// format or just regular HTTP headers (Set-Cookie style) dumped to a file. ++ /// ++ /// This also enables the cookie engine, making libcurl parse and send ++ /// cookies on subsequent requests with this handle. ++ /// ++ /// Given an empty or non-existing file or by passing the empty string ("") ++ /// to this option, you can enable the cookie engine without reading any ++ /// initial cookies. ++ /// ++ /// If you use this option multiple times, you just add more files to read. ++ /// Subsequent files will add more cookies. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_COOKIEFILE`. ++ pub fn cookie_file>(&mut self, file: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_COOKIEFILE, file.as_ref()) ++ } ++ ++ /// Set the file name to store cookies to. ++ /// ++ /// This will make libcurl write all internally known cookies to the file ++ /// when this handle is dropped. If no cookies are known, no file will be ++ /// created. Specify "-" as filename to instead have the cookies written to ++ /// stdout. Using this option also enables cookies for this session, so if ++ /// you for example follow a location it will make matching cookies get sent ++ /// accordingly. ++ /// ++ /// Note that libcurl doesn't read any cookies from the cookie jar. If you ++ /// want to read cookies from a file, use `cookie_file`. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_COOKIEJAR`. ++ pub fn cookie_jar>(&mut self, file: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_COOKIEJAR, file.as_ref()) ++ } ++ ++ /// Start a new cookie session ++ /// ++ /// Marks this as a new cookie "session". It will force libcurl to ignore ++ /// all cookies it is about to load that are "session cookies" from the ++ /// previous session. By default, libcurl always stores and loads all ++ /// cookies, independent if they are session cookies or not. Session cookies ++ /// are cookies without expiry date and they are meant to be alive and ++ /// existing for this "session" only. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_COOKIESESSION`. ++ pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_COOKIESESSION, session as c_long) ++ } ++ ++ /// Add to or manipulate cookies held in memory. ++ /// ++ /// Such a cookie can be either a single line in Netscape / Mozilla format ++ /// or just regular HTTP-style header (Set-Cookie: ...) format. This will ++ /// also enable the cookie engine. This adds that single cookie to the ++ /// internal cookie store. ++ /// ++ /// Exercise caution if you are using this option and multiple transfers may ++ /// occur. If you use the Set-Cookie format and don't specify a domain then ++ /// the cookie is sent for any domain (even after redirects are followed) ++ /// and cannot be modified by a server-set cookie. If a server sets a cookie ++ /// of the same name (or maybe you've imported one) then both will be sent ++ /// on a future transfer to that server, likely not what you intended. ++ /// address these issues set a domain in Set-Cookie or use the Netscape ++ /// format. ++ /// ++ /// Additionally, there are commands available that perform actions if you ++ /// pass in these exact strings: ++ /// ++ /// * "ALL" - erases all cookies held in memory ++ /// * "SESS" - erases all session cookies held in memory ++ /// * "FLUSH" - write all known cookies to the specified cookie jar ++ /// * "RELOAD" - reread all cookies from the cookie file ++ /// ++ /// By default this options corresponds to `CURLOPT_COOKIELIST` ++ pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { ++ let cookie = try!(CString::new(cookie)); ++ self.setopt_str(curl_sys::CURLOPT_COOKIELIST, &cookie) ++ } ++ ++ /// Ask for a HTTP GET request. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. ++ pub fn get(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) ++ } ++ ++ // /// Ask for a HTTP GET request. ++ // /// ++ // /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. ++ // pub fn http_version(&mut self, vers: &str) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) ++ // } ++ ++ /// Ignore the content-length header. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_IGNORE_CONTENT_LENGTH`. ++ pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_IGNORE_CONTENT_LENGTH, ++ ignore as c_long) ++ } ++ ++ /// Enable or disable HTTP content decoding. ++ /// ++ /// By default this option is `true` and corresponds to ++ /// `CURLOPT_HTTP_CONTENT_DECODING`. ++ pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTP_CONTENT_DECODING, ++ enable as c_long) ++ } ++ ++ /// Enable or disable HTTP transfer decoding. ++ /// ++ /// By default this option is `true` and corresponds to ++ /// `CURLOPT_HTTP_TRANSFER_DECODING`. ++ pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, ++ enable as c_long) ++ } ++ ++ // /// Timeout for the Expect: 100-continue response ++ // /// ++ // /// By default this option is 1s and corresponds to ++ // /// `CURLOPT_EXPECT_100_TIMEOUT_MS`. ++ // pub fn expect_100_timeout(&mut self, enable: bool) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, ++ // enable as c_long) ++ // } ++ ++ // /// Wait for pipelining/multiplexing. ++ // /// ++ // /// Tells libcurl to prefer to wait for a connection to confirm or deny that ++ // /// it can do pipelining or multiplexing before continuing. ++ // /// ++ // /// When about to perform a new transfer that allows pipelining or ++ // /// multiplexing, libcurl will check for existing connections to re-use and ++ // /// pipeline on. If no such connection exists it will immediately continue ++ // /// and create a fresh new connection to use. ++ // /// ++ // /// By setting this option to `true` - having `pipeline` enabled for the ++ // /// multi handle this transfer is associated with - libcurl will instead ++ // /// wait for the connection to reveal if it is possible to ++ // /// pipeline/multiplex on before it continues. This enables libcurl to much ++ // /// better keep the number of connections to a minimum when using pipelining ++ // /// or multiplexing protocols. ++ // /// ++ // /// The effect thus becomes that with this option set, libcurl prefers to ++ // /// wait and re-use an existing connection for pipelining rather than the ++ // /// opposite: prefer to open a new connection rather than waiting. ++ // /// ++ // /// The waiting time is as long as it takes for the connection to get up and ++ // /// for libcurl to get the necessary response back that informs it about its ++ // /// protocol and support level. ++ // pub fn http_pipewait(&mut self, enable: bool) -> Result<(), Error> { ++ // } ++ ++ ++ // ========================================================================= ++ // Protocol Options ++ ++ /// Indicates the range that this request should retrieve. ++ /// ++ /// The string provided should be of the form `N-M` where either `N` or `M` ++ /// can be left out. For HTTP transfers multiple ranges separated by commas ++ /// are also accepted. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_RANGE`. ++ pub fn range(&mut self, range: &str) -> Result<(), Error> { ++ let range = try!(CString::new(range)); ++ self.setopt_str(curl_sys::CURLOPT_RANGE, &range) ++ } ++ ++ /// Set a point to resume transfer from ++ /// ++ /// Specify the offset in bytes you want the transfer to start from. ++ /// ++ /// By default this option is 0 and corresponds to ++ /// `CURLOPT_RESUME_FROM_LARGE`. ++ pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { ++ self.setopt_off_t(curl_sys::CURLOPT_RESUME_FROM_LARGE, ++ from as curl_sys::curl_off_t) ++ } ++ ++ /// Set a custom request string ++ /// ++ /// Specifies that a custom request will be made (e.g. a custom HTTP ++ /// method). This does not change how libcurl performs internally, just ++ /// changes the string sent to the server. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_CUSTOMREQUEST`. ++ pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { ++ let request = try!(CString::new(request)); ++ self.setopt_str(curl_sys::CURLOPT_CUSTOMREQUEST, &request) ++ } ++ ++ /// Get the modification time of the remote resource ++ /// ++ /// If true, libcurl will attempt to get the modification time of the ++ /// remote document in this operation. This requires that the remote server ++ /// sends the time or replies to a time querying command. The `filetime` ++ /// function can be used after a transfer to extract the received time (if ++ /// any). ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_FILETIME` ++ pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_FILETIME, fetch as c_long) ++ } ++ ++ /// Indicate whether to download the request without getting the body ++ /// ++ /// This is useful, for example, for doing a HEAD request. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_NOBODY`. ++ pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_NOBODY, enable as c_long) ++ } ++ ++ /// Set the size of the input file to send off. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_INFILESIZE_LARGE`. ++ pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { ++ self.setopt_off_t(curl_sys::CURLOPT_INFILESIZE_LARGE, ++ size as curl_sys::curl_off_t) ++ } ++ ++ /// Enable or disable data upload. ++ /// ++ /// This means that a PUT request will be made for HTTP and probably wants ++ /// to be combined with the read callback as well as the `in_filesize` ++ /// method. ++ /// ++ /// By default this option is `false` and corresponds to `CURLOPT_UPLOAD`. ++ pub fn upload(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_UPLOAD, enable as c_long) ++ } ++ ++ /// Configure the maximum file size to download. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_MAXFILESIZE_LARGE`. ++ pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { ++ self.setopt_off_t(curl_sys::CURLOPT_MAXFILESIZE_LARGE, ++ size as curl_sys::curl_off_t) ++ } ++ ++ /// Selects a condition for a time request. ++ /// ++ /// This value indicates how the `time_value` option is interpreted. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_TIMECONDITION`. ++ pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TIMECONDITION, cond as c_long) ++ } ++ ++ /// Sets the time value for a conditional request. ++ /// ++ /// The value here should be the number of seconds elapsed since January 1, ++ /// 1970. To pass how to interpret this value, use `time_condition`. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_TIMEVALUE`. ++ pub fn time_value(&mut self, val: i64) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_TIMEVALUE, val as c_long) ++ } ++ ++ // ========================================================================= ++ // Connection Options ++ ++ /// Set maximum time the request is allowed to take. ++ /// ++ /// Normally, name lookups can take a considerable time and limiting ++ /// operations to less than a few minutes risk aborting perfectly normal ++ /// operations. ++ /// ++ /// If libcurl is built to use the standard system name resolver, that ++ /// portion of the transfer will still use full-second resolution for ++ /// timeouts with a minimum timeout allowed of one second. ++ /// ++ /// In unix-like systems, this might cause signals to be used unless ++ /// `nosignal` is set. ++ /// ++ /// Since this puts a hard limit for how long a request is allowed to ++ /// take, it has limited use in dynamic use cases with varying transfer ++ /// times. You are then advised to explore `low_speed_limit`, ++ /// `low_speed_time` or using `progress_function` to implement your own ++ /// timeout logic. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_TIMEOUT_MS`. ++ pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { ++ // TODO: checked arithmetic and casts ++ // TODO: use CURLOPT_TIMEOUT if the timeout is too great ++ let ms = timeout.as_secs() * 1000 + ++ (timeout.subsec_nanos() / 1_000_000) as u64; ++ self.setopt_long(curl_sys::CURLOPT_TIMEOUT_MS, ms as c_long) ++ ++ } ++ ++ /// Set the low speed limit in bytes per second. ++ /// ++ /// This specifies the average transfer speed in bytes per second that the ++ /// transfer should be below during `low_speed_time` for libcurl to consider ++ /// it to be too slow and abort. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_LOW_SPEED_LIMIT`. ++ pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_LIMIT, limit as c_long) ++ } ++ ++ /// Set the low speed time period. ++ /// ++ /// Specifies the window of time for which if the transfer rate is below ++ /// `low_speed_limit` the request will be aborted. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_LOW_SPEED_TIME`. ++ pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_TIME, ++ dur.as_secs() as c_long) ++ } ++ ++ /// Rate limit data upload speed ++ /// ++ /// If an upload exceeds this speed (counted in bytes per second) on ++ /// cumulative average during the transfer, the transfer will pause to keep ++ /// the average rate less than or equal to the parameter value. ++ /// ++ /// By default this option is not set (unlimited speed) and corresponds to ++ /// `CURLOPT_MAX_SEND_SPEED_LARGE`. ++ pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { ++ self.setopt_off_t(curl_sys::CURLOPT_MAX_SEND_SPEED_LARGE, ++ speed as curl_sys::curl_off_t) ++ } ++ ++ /// Rate limit data download speed ++ /// ++ /// If a download exceeds this speed (counted in bytes per second) on ++ /// cumulative average during the transfer, the transfer will pause to keep ++ /// the average rate less than or equal to the parameter value. ++ /// ++ /// By default this option is not set (unlimited speed) and corresponds to ++ /// `CURLOPT_MAX_RECV_SPEED_LARGE`. ++ pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { ++ self.setopt_off_t(curl_sys::CURLOPT_MAX_RECV_SPEED_LARGE, ++ speed as curl_sys::curl_off_t) ++ } ++ ++ /// Set the maximum connection cache size. ++ /// ++ /// The set amount will be the maximum number of simultaneously open ++ /// persistent connections that libcurl may cache in the pool associated ++ /// with this handle. The default is 5, and there isn't much point in ++ /// changing this value unless you are perfectly aware of how this works and ++ /// changes libcurl's behaviour. This concerns connections using any of the ++ /// protocols that support persistent connections. ++ /// ++ /// When reaching the maximum limit, curl closes the oldest one in the cache ++ /// to prevent increasing the number of open connections. ++ /// ++ /// By default this option is set to 5 and corresponds to ++ /// `CURLOPT_MAXCONNECTS` ++ pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_MAXCONNECTS, max as c_long) ++ } ++ ++ /// Force a new connection to be used. ++ /// ++ /// Makes the next transfer use a new (fresh) connection by force instead of ++ /// trying to re-use an existing one. This option should be used with ++ /// caution and only if you understand what it does as it may seriously ++ /// impact performance. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_FRESH_CONNECT`. ++ pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_FRESH_CONNECT, enable as c_long) ++ } ++ ++ /// Make connection get closed at once after use. ++ /// ++ /// Makes libcurl explicitly close the connection when done with the ++ /// transfer. Normally, libcurl keeps all connections alive when done with ++ /// one transfer in case a succeeding one follows that can re-use them. ++ /// This option should be used with caution and only if you understand what ++ /// it does as it can seriously impact performance. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_FORBID_REUSE`. ++ pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_FORBID_REUSE, enable as c_long) ++ } ++ ++ /// Timeout for the connect phase ++ /// ++ /// This is the maximum time that you allow the connection phase to the ++ /// server to take. This only limits the connection phase, it has no impact ++ /// once it has connected. ++ /// ++ /// By default this value is 300 seconds and corresponds to ++ /// `CURLOPT_CONNECTTIMEOUT_MS`. ++ pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { ++ let ms = timeout.as_secs() * 1000 + ++ (timeout.subsec_nanos() / 1_000_000) as u64; ++ self.setopt_long(curl_sys::CURLOPT_CONNECTTIMEOUT_MS, ms as c_long) ++ } ++ ++ /// Specify which IP protocol version to use ++ /// ++ /// Allows an application to select what kind of IP addresses to use when ++ /// resolving host names. This is only interesting when using host names ++ /// that resolve addresses using more than one version of IP. ++ /// ++ /// By default this value is "any" and corresponds to `CURLOPT_IPRESOLVE`. ++ pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_IPRESOLVE, resolve as c_long) ++ } ++ ++ /// Specify custom host name to IP address resolves. ++ /// ++ /// Allows specifying hostname to IP mappins to use before trying the ++ /// system resolver. ++ /// ++ /// # Examples ++ /// ``` ++ /// use curl::easy::{Easy, List}; ++ /// ++ /// let mut list = List::new(); ++ /// list.append("www.rust-lang.org:443:185.199.108.153").unwrap(); ++ /// ++ /// let mut handle = Easy::new(); ++ /// handle.url("https://www.rust-lang.org/").unwrap(); ++ /// handle.resolve(list).unwrap(); ++ /// handle.perform().unwrap(); ++ /// ``` ++ pub fn resolve(&mut self, list: List) -> Result<(), Error> { ++ let ptr = list::raw(&list); ++ self.inner.resolve_list = Some(list); ++ self.setopt_ptr(curl_sys::CURLOPT_RESOLVE, ptr as *const _) ++ } ++ ++ ++ /// Configure whether to stop when connected to target server ++ /// ++ /// When enabled it tells the library to perform all the required proxy ++ /// authentication and connection setup, but no data transfer, and then ++ /// return. ++ /// ++ /// The option can be used to simply test a connection to a server. ++ /// ++ /// By default this value is `false` and corresponds to ++ /// `CURLOPT_CONNECT_ONLY`. ++ pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_CONNECT_ONLY, enable as c_long) ++ } ++ ++ // /// Set interface to speak DNS over. ++ // /// ++ // /// Set the name of the network interface that the DNS resolver should bind ++ // /// to. This must be an interface name (not an address). ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_DNS_INTERFACE`. ++ // pub fn dns_interface(&mut self, interface: &str) -> Result<(), Error> { ++ // let interface = try!(CString::new(interface)); ++ // self.setopt_str(curl_sys::CURLOPT_DNS_INTERFACE, &interface) ++ // } ++ // ++ // /// IPv4 address to bind DNS resolves to ++ // /// ++ // /// Set the local IPv4 address that the resolver should bind to. The ++ // /// argument should be of type char * and contain a single numerical IPv4 ++ // /// address as a string. ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_DNS_LOCAL_IP4`. ++ // pub fn dns_local_ip4(&mut self, ip: &str) -> Result<(), Error> { ++ // let ip = try!(CString::new(ip)); ++ // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP4, &ip) ++ // } ++ // ++ // /// IPv6 address to bind DNS resolves to ++ // /// ++ // /// Set the local IPv6 address that the resolver should bind to. The ++ // /// argument should be of type char * and contain a single numerical IPv6 ++ // /// address as a string. ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_DNS_LOCAL_IP6`. ++ // pub fn dns_local_ip6(&mut self, ip: &str) -> Result<(), Error> { ++ // let ip = try!(CString::new(ip)); ++ // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP6, &ip) ++ // } ++ // ++ // /// Set preferred DNS servers. ++ // /// ++ // /// Provides a list of DNS servers to be used instead of the system default. ++ // /// The format of the dns servers option is: ++ // /// ++ // /// ```text ++ // /// host[:port],[host[:port]]... ++ // /// ``` ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_DNS_SERVERS`. ++ // pub fn dns_servers(&mut self, servers: &str) -> Result<(), Error> { ++ // let servers = try!(CString::new(servers)); ++ // self.setopt_str(curl_sys::CURLOPT_DNS_SERVERS, &servers) ++ // } ++ ++ // ========================================================================= ++ // SSL/Security Options ++ ++ /// Sets the SSL client certificate. ++ /// ++ /// The string should be the file name of your client certificate. The ++ /// default format is "P12" on Secure Transport and "PEM" on other engines, ++ /// and can be changed with `ssl_cert_type`. ++ /// ++ /// With NSS or Secure Transport, this can also be the nickname of the ++ /// certificate you wish to authenticate with as it is named in the security ++ /// database. If you want to use a file from the current directory, please ++ /// precede it with "./" prefix, in order to avoid confusion with a ++ /// nickname. ++ /// ++ /// When using a client certificate, you most likely also need to provide a ++ /// private key with `ssl_key`. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_SSLCERT`. ++ pub fn ssl_cert>(&mut self, cert: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_SSLCERT, cert.as_ref()) ++ } ++ ++ /// Specify type of the client SSL certificate. ++ /// ++ /// The string should be the format of your certificate. Supported formats ++ /// are "PEM" and "DER", except with Secure Transport. OpenSSL (versions ++ /// 0.9.3 and later) and Secure Transport (on iOS 5 or later, or OS X 10.7 ++ /// or later) also support "P12" for PKCS#12-encoded files. ++ /// ++ /// By default this option is "PEM" and corresponds to ++ /// `CURLOPT_SSLCERTTYPE`. ++ pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { ++ let kind = try!(CString::new(kind)); ++ self.setopt_str(curl_sys::CURLOPT_SSLCERTTYPE, &kind) ++ } ++ ++ /// Specify private keyfile for TLS and SSL client cert. ++ /// ++ /// The string should be the file name of your private key. The default ++ /// format is "PEM" and can be changed with `ssl_key_type`. ++ /// ++ /// (iOS and Mac OS X only) This option is ignored if curl was built against ++ /// Secure Transport. Secure Transport expects the private key to be already ++ /// present in the keychain or PKCS#12 file containing the certificate. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_SSLKEY`. ++ pub fn ssl_key>(&mut self, key: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_SSLKEY, key.as_ref()) ++ } ++ ++ /// Set type of the private key file. ++ /// ++ /// The string should be the format of your private key. Supported formats ++ /// are "PEM", "DER" and "ENG". ++ /// ++ /// The format "ENG" enables you to load the private key from a crypto ++ /// engine. In this case `ssl_key` is used as an identifier passed to ++ /// the engine. You have to set the crypto engine with `ssl_engine`. ++ /// "DER" format key file currently does not work because of a bug in ++ /// OpenSSL. ++ /// ++ /// By default this option is "PEM" and corresponds to ++ /// `CURLOPT_SSLKEYTYPE`. ++ pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { ++ let kind = try!(CString::new(kind)); ++ self.setopt_str(curl_sys::CURLOPT_SSLKEYTYPE, &kind) ++ } ++ ++ /// Set passphrase to private key. ++ /// ++ /// This will be used as the password required to use the `ssl_key`. ++ /// You never needed a pass phrase to load a certificate but you need one to ++ /// load your private key. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_KEYPASSWD`. ++ pub fn key_password(&mut self, password: &str) -> Result<(), Error> { ++ let password = try!(CString::new(password)); ++ self.setopt_str(curl_sys::CURLOPT_KEYPASSWD, &password) ++ } ++ ++ /// Set the SSL engine identifier. ++ /// ++ /// This will be used as the identifier for the crypto engine you want to ++ /// use for your private key. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_SSLENGINE`. ++ pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { ++ let engine = try!(CString::new(engine)); ++ self.setopt_str(curl_sys::CURLOPT_SSLENGINE, &engine) ++ } ++ ++ /// Make this handle's SSL engine the default. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_SSLENGINE_DEFAULT`. ++ pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) ++ } ++ ++ // /// Enable TLS false start. ++ // /// ++ // /// This option determines whether libcurl should use false start during the ++ // /// TLS handshake. False start is a mode where a TLS client will start ++ // /// sending application data before verifying the server's Finished message, ++ // /// thus saving a round trip when performing a full handshake. ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_SSL_FALSESTARTE`. ++ // pub fn ssl_false_start(&mut self, enable: bool) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) ++ // } ++ ++ /// Set preferred HTTP version. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_HTTP_VERSION`. ++ pub fn http_version(&mut self, version: HttpVersion) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_HTTP_VERSION, version as c_long) ++ } ++ ++ /// Set preferred TLS/SSL version. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_SSLVERSION`. ++ pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_SSLVERSION, version as c_long) ++ } ++ ++ /// Verify the certificate's name against host. ++ /// ++ /// This should be disabled with great caution! It basically disables the ++ /// security features of SSL if it is disabled. ++ /// ++ /// By default this option is set to `true` and corresponds to ++ /// `CURLOPT_SSL_VERIFYHOST`. ++ pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { ++ let val = if verify {2} else {0}; ++ self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYHOST, val) ++ } ++ ++ /// Verify the peer's SSL certificate. ++ /// ++ /// This should be disabled with great caution! It basically disables the ++ /// security features of SSL if it is disabled. ++ /// ++ /// By default this option is set to `true` and corresponds to ++ /// `CURLOPT_SSL_VERIFYPEER`. ++ pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYPEER, verify as c_long) ++ } ++ ++ // /// Verify the certificate's status. ++ // /// ++ // /// This option determines whether libcurl verifies the status of the server ++ // /// cert using the "Certificate Status Request" TLS extension (aka. OCSP ++ // /// stapling). ++ // /// ++ // /// By default this option is set to `false` and corresponds to ++ // /// `CURLOPT_SSL_VERIFYSTATUS`. ++ // pub fn ssl_verify_status(&mut self, verify: bool) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYSTATUS, verify as c_long) ++ // } ++ ++ /// Specify the path to Certificate Authority (CA) bundle ++ /// ++ /// The file referenced should hold one or more certificates to verify the ++ /// peer with. ++ /// ++ /// This option is by default set to the system path where libcurl's cacert ++ /// bundle is assumed to be stored, as established at build time. ++ /// ++ /// If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module ++ /// (libnsspem.so) needs to be available for this option to work properly. ++ /// ++ /// By default this option is the system defaults, and corresponds to ++ /// `CURLOPT_CAINFO`. ++ pub fn cainfo>(&mut self, path: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_CAINFO, path.as_ref()) ++ } ++ ++ /// Set the issuer SSL certificate filename ++ /// ++ /// Specifies a file holding a CA certificate in PEM format. If the option ++ /// is set, an additional check against the peer certificate is performed to ++ /// verify the issuer is indeed the one associated with the certificate ++ /// provided by the option. This additional check is useful in multi-level ++ /// PKI where one needs to enforce that the peer certificate is from a ++ /// specific branch of the tree. ++ /// ++ /// This option makes sense only when used in combination with the ++ /// `ssl_verify_peer` option. Otherwise, the result of the check is not ++ /// considered as failure. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_ISSUERCERT`. ++ pub fn issuer_cert>(&mut self, path: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_ISSUERCERT, path.as_ref()) ++ } ++ ++ /// Specify directory holding CA certificates ++ /// ++ /// Names a directory holding multiple CA certificates to verify the peer ++ /// with. If libcurl is built against OpenSSL, the certificate directory ++ /// must be prepared using the openssl c_rehash utility. This makes sense ++ /// only when used in combination with the `ssl_verify_peer` option. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_CAPATH`. ++ pub fn capath>(&mut self, path: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_CAPATH, path.as_ref()) ++ } ++ ++ /// Specify a Certificate Revocation List file ++ /// ++ /// Names a file with the concatenation of CRL (in PEM format) to use in the ++ /// certificate validation that occurs during the SSL exchange. ++ /// ++ /// When curl is built to use NSS or GnuTLS, there is no way to influence ++ /// the use of CRL passed to help in the verification process. When libcurl ++ /// is built with OpenSSL support, X509_V_FLAG_CRL_CHECK and ++ /// X509_V_FLAG_CRL_CHECK_ALL are both set, requiring CRL check against all ++ /// the elements of the certificate chain if a CRL file is passed. ++ /// ++ /// This option makes sense only when used in combination with the ++ /// `ssl_verify_peer` option. ++ /// ++ /// A specific error code (`is_ssl_crl_badfile`) is defined with the ++ /// option. It is returned when the SSL exchange fails because the CRL file ++ /// cannot be loaded. A failure in certificate verification due to a ++ /// revocation information found in the CRL does not trigger this specific ++ /// error. ++ /// ++ /// By default this option is not set and corresponds to `CURLOPT_CRLFILE`. ++ pub fn crlfile>(&mut self, path: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_CRLFILE, path.as_ref()) ++ } ++ ++ /// Request SSL certificate information ++ /// ++ /// Enable libcurl's certificate chain info gatherer. With this enabled, ++ /// libcurl will extract lots of information and data about the certificates ++ /// in the certificate chain used in the SSL connection. ++ /// ++ /// By default this option is `false` and corresponds to ++ /// `CURLOPT_CERTINFO`. ++ pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) ++ } ++ ++ // /// Set pinned public key. ++ // /// ++ // /// Pass a pointer to a zero terminated string as parameter. The string can ++ // /// be the file name of your pinned public key. The file format expected is ++ // /// "PEM" or "DER". The string can also be any number of base64 encoded ++ // /// sha256 hashes preceded by "sha256//" and separated by ";" ++ // /// ++ // /// When negotiating a TLS or SSL connection, the server sends a certificate ++ // /// indicating its identity. A public key is extracted from this certificate ++ // /// and if it does not exactly match the public key provided to this option, ++ // /// curl will abort the connection before sending or receiving any data. ++ // /// ++ // /// By default this option is not set and corresponds to ++ // /// `CURLOPT_PINNEDPUBLICKEY`. ++ // pub fn pinned_public_key(&mut self, enable: bool) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) ++ // } ++ ++ /// Specify a source for random data ++ /// ++ /// The file will be used to read from to seed the random engine for SSL and ++ /// more. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_RANDOM_FILE`. ++ pub fn random_file>(&mut self, p: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_RANDOM_FILE, p.as_ref()) ++ } ++ ++ /// Specify EGD socket path. ++ /// ++ /// Indicates the path name to the Entropy Gathering Daemon socket. It will ++ /// be used to seed the random engine for SSL. ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_EGDSOCKET`. ++ pub fn egd_socket>(&mut self, p: P) -> Result<(), Error> { ++ self.setopt_path(curl_sys::CURLOPT_EGDSOCKET, p.as_ref()) ++ } ++ ++ /// Specify ciphers to use for TLS. ++ /// ++ /// Holds the list of ciphers to use for the SSL connection. The list must ++ /// be syntactically correct, it consists of one or more cipher strings ++ /// separated by colons. Commas or spaces are also acceptable separators ++ /// but colons are normally used, !, - and + can be used as operators. ++ /// ++ /// For OpenSSL and GnuTLS valid examples of cipher lists include 'RC4-SHA', ++ /// ´SHA1+DES´, 'TLSv1' and 'DEFAULT'. The default list is normally set when ++ /// you compile OpenSSL. ++ /// ++ /// You'll find more details about cipher lists on this URL: ++ /// ++ /// https://www.openssl.org/docs/apps/ciphers.html ++ /// ++ /// For NSS, valid examples of cipher lists include 'rsa_rc4_128_md5', ++ /// ´rsa_aes_128_sha´, etc. With NSS you don't add/remove ciphers. If one ++ /// uses this option then all known ciphers are disabled and only those ++ /// passed in are enabled. ++ /// ++ /// You'll find more details about the NSS cipher lists on this URL: ++ /// ++ /// http://git.fedorahosted.org/cgit/mod_nss.git/plain/docs/mod_nss.html#Directives ++ /// ++ /// By default this option is not set and corresponds to ++ /// `CURLOPT_SSL_CIPHER_LIST`. ++ pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { ++ let ciphers = try!(CString::new(ciphers)); ++ self.setopt_str(curl_sys::CURLOPT_SSL_CIPHER_LIST, &ciphers) ++ } ++ ++ /// Enable or disable use of the SSL session-ID cache ++ /// ++ /// By default all transfers are done using the cache enabled. While nothing ++ /// ever should get hurt by attempting to reuse SSL session-IDs, there seem ++ /// to be or have been broken SSL implementations in the wild that may ++ /// require you to disable this in order for you to succeed. ++ /// ++ /// This corresponds to the `CURLOPT_SSL_SESSIONID_CACHE` option. ++ pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_SSL_SESSIONID_CACHE, ++ enable as c_long) ++ } ++ ++ /// Set SSL behavior options ++ /// ++ /// Inform libcurl about SSL specific behaviors. ++ /// ++ /// This corresponds to the `CURLOPT_SSL_OPTIONS` option. ++ pub fn ssl_options(&mut self, bits: &SslOpt) -> Result<(), Error> { ++ self.setopt_long(curl_sys::CURLOPT_SSL_OPTIONS, bits.bits) ++ } ++ ++ // /// Set SSL behavior options for proxies ++ // /// ++ // /// Inform libcurl about SSL specific behaviors. ++ // /// ++ // /// This corresponds to the `CURLOPT_PROXY_SSL_OPTIONS` option. ++ // pub fn proxy_ssl_options(&mut self, bits: &SslOpt) -> Result<(), Error> { ++ // self.setopt_long(curl_sys::CURLOPT_PROXY_SSL_OPTIONS, bits.bits) ++ // } ++ ++ // /// Stores a private pointer-sized piece of data. ++ // /// ++ // /// This can be retrieved through the `private` function and otherwise ++ // /// libcurl does not tamper with this value. This corresponds to ++ // /// `CURLOPT_PRIVATE` and defaults to 0. ++ // pub fn set_private(&mut self, private: usize) -> Result<(), Error> { ++ // self.setopt_ptr(curl_sys::CURLOPT_PRIVATE, private as *const _) ++ // } ++ // ++ // /// Fetches this handle's private pointer-sized piece of data. ++ // /// ++ // /// This corresponds to `CURLINFO_PRIVATE` and defaults to 0. ++ // pub fn private(&mut self) -> Result { ++ // self.getopt_ptr(curl_sys::CURLINFO_PRIVATE).map(|p| p as usize) ++ // } ++ ++ // ========================================================================= ++ // getters ++ ++ ++ /// Get info on unmet time conditional ++ /// ++ /// Returns if the condition provided in the previous request didn't match ++ /// ++ //// This corresponds to `CURLINFO_CONDITION_UNMET` and may return an error if the ++ /// option is not supported ++ pub fn time_condition_unmet(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_CONDITION_UNMET).map(|r| { ++ if r==0 { ++ false ++ } else { ++ true ++ } ++ }) ++ } ++ ++ /// Get the last used URL ++ /// ++ /// In cases when you've asked libcurl to follow redirects, it may ++ /// not be the same value you set with `url`. ++ /// ++ /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. ++ /// ++ /// Returns `Ok(None)` if no effective url is listed or `Err` if an error ++ /// happens or the underlying bytes aren't valid utf-8. ++ pub fn effective_url(&mut self) -> Result, Error> { ++ self.getopt_str(curl_sys::CURLINFO_EFFECTIVE_URL) ++ } ++ ++ /// Get the last used URL, in bytes ++ /// ++ /// In cases when you've asked libcurl to follow redirects, it may ++ /// not be the same value you set with `url`. ++ /// ++ /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. ++ /// ++ /// Returns `Ok(None)` if no effective url is listed or `Err` if an error ++ /// happens or the underlying bytes aren't valid utf-8. ++ pub fn effective_url_bytes(&mut self) -> Result, Error> { ++ self.getopt_bytes(curl_sys::CURLINFO_EFFECTIVE_URL) ++ } ++ ++ /// Get the last response code ++ /// ++ /// The stored value will be zero if no server response code has been ++ /// received. Note that a proxy's CONNECT response should be read with ++ /// `http_connectcode` and not this. ++ /// ++ /// Corresponds to `CURLINFO_RESPONSE_CODE` and returns an error if this ++ /// option is not supported. ++ pub fn response_code(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_RESPONSE_CODE).map(|c| c as u32) ++ } ++ ++ /// Get the CONNECT response code ++ /// ++ /// Returns the last received HTTP proxy response code to a CONNECT request. ++ /// The returned value will be zero if no such response code was available. ++ /// ++ /// Corresponds to `CURLINFO_HTTP_CONNECTCODE` and returns an error if this ++ /// option is not supported. ++ pub fn http_connectcode(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_HTTP_CONNECTCODE).map(|c| c as u32) ++ } ++ ++ /// Get the remote time of the retrieved document ++ /// ++ /// Returns the remote time of the retrieved document (in number of seconds ++ /// since 1 Jan 1970 in the GMT/UTC time zone). If you get `None`, it can be ++ /// because of many reasons (it might be unknown, the server might hide it ++ /// or the server doesn't support the command that tells document time etc) ++ /// and the time of the document is unknown. ++ /// ++ /// Note that you must tell the server to collect this information before ++ /// the transfer is made, by using the `filetime` method to ++ /// or you will unconditionally get a `None` back. ++ /// ++ /// This corresponds to `CURLINFO_FILETIME` and may return an error if the ++ /// option is not supported ++ pub fn filetime(&mut self) -> Result, Error> { ++ self.getopt_long(curl_sys::CURLINFO_FILETIME).map(|r| { ++ if r == -1 { ++ None ++ } else { ++ Some(r as i64) ++ } ++ }) ++ } ++ ++ /// Get the number of downloaded bytes ++ /// ++ /// Returns the total amount of bytes that were downloaded. ++ /// The amount is only for the latest transfer and will be reset again for each new transfer. ++ /// This counts actual payload data, what's also commonly called body. ++ /// All meta and header data are excluded and will not be counted in this number. ++ /// ++ /// This corresponds to `CURLINFO_SIZE_DOWNLOAD` and may return an error if the ++ /// option is not supported ++ pub fn download_size(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_SIZE_DOWNLOAD) ++ .map(|r| r as f64) ++ } ++ ++ /// Get the content-length of the download ++ /// ++ /// Returns the content-length of the download. ++ /// This is the value read from the Content-Length: field ++ /// ++ /// This corresponds to `CURLINFO_CONTENT_LENGTH_DOWNLOAD` and may return an error if the ++ /// option is not supported ++ pub fn content_length_download(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_CONTENT_LENGTH_DOWNLOAD) ++ .map(|r| r as f64) ++ } ++ ++ /// Get total time of previous transfer ++ /// ++ /// Returns the total time for the previous transfer, ++ /// including name resolving, TCP connect etc. ++ /// ++ /// Corresponds to `CURLINFO_TOTAL_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn total_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_TOTAL_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the name lookup time ++ /// ++ /// Returns the total time from the start ++ /// until the name resolving was completed. ++ /// ++ /// Corresponds to `CURLINFO_NAMELOOKUP_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn namelookup_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_NAMELOOKUP_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the time until connect ++ /// ++ /// Returns the total time from the start ++ /// until the connection to the remote host (or proxy) was completed. ++ /// ++ /// Corresponds to `CURLINFO_CONNECT_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn connect_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_CONNECT_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the time until the SSL/SSH handshake is completed ++ /// ++ /// Returns the total time it took from the start until the SSL/SSH ++ /// connect/handshake to the remote host was completed. This time is most often ++ /// very near to the `pretransfer_time` time, except for cases such as ++ /// HTTP pipelining where the pretransfer time can be delayed due to waits in ++ /// line for the pipeline and more. ++ /// ++ /// Corresponds to `CURLINFO_APPCONNECT_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn appconnect_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_APPCONNECT_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the time until the file transfer start ++ /// ++ /// Returns the total time it took from the start until the file ++ /// transfer is just about to begin. This includes all pre-transfer commands ++ /// and negotiations that are specific to the particular protocol(s) involved. ++ /// It does not involve the sending of the protocol- specific request that ++ /// triggers a transfer. ++ /// ++ /// Corresponds to `CURLINFO_PRETRANSFER_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn pretransfer_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_PRETRANSFER_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the time until the first byte is received ++ /// ++ /// Returns the total time it took from the start until the first ++ /// byte is received by libcurl. This includes `pretransfer_time` and ++ /// also the time the server needs to calculate the result. ++ /// ++ /// Corresponds to `CURLINFO_STARTTRANSFER_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn starttransfer_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_STARTTRANSFER_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the time for all redirection steps ++ /// ++ /// Returns the total time it took for all redirection steps ++ /// include name lookup, connect, pretransfer and transfer before final ++ /// transaction was started. `redirect_time` contains the complete ++ /// execution time for multiple redirections. ++ /// ++ /// Corresponds to `CURLINFO_REDIRECT_TIME` and may return an error if the ++ /// option isn't supported. ++ pub fn redirect_time(&mut self) -> Result { ++ self.getopt_double(curl_sys::CURLINFO_REDIRECT_TIME) ++ .map(double_seconds_to_duration) ++ } ++ ++ /// Get the number of redirects ++ /// ++ /// Corresponds to `CURLINFO_REDIRECT_COUNT` and may return an error if the ++ /// option isn't supported. ++ pub fn redirect_count(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_REDIRECT_COUNT).map(|c| c as u32) ++ } ++ ++ /// Get the URL a redirect would go to ++ /// ++ /// Returns the URL a redirect would take you to if you would enable ++ /// `follow_location`. This can come very handy if you think using the ++ /// built-in libcurl redirect logic isn't good enough for you but you would ++ /// still prefer to avoid implementing all the magic of figuring out the new ++ /// URL. ++ /// ++ /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error if the ++ /// url isn't valid utf-8 or an error happens. ++ pub fn redirect_url(&mut self) -> Result, Error> { ++ self.getopt_str(curl_sys::CURLINFO_REDIRECT_URL) ++ } ++ ++ /// Get the URL a redirect would go to, in bytes ++ /// ++ /// Returns the URL a redirect would take you to if you would enable ++ /// `follow_location`. This can come very handy if you think using the ++ /// built-in libcurl redirect logic isn't good enough for you but you would ++ /// still prefer to avoid implementing all the magic of figuring out the new ++ /// URL. ++ /// ++ /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error. ++ pub fn redirect_url_bytes(&mut self) -> Result, Error> { ++ self.getopt_bytes(curl_sys::CURLINFO_REDIRECT_URL) ++ } ++ ++ /// Get size of retrieved headers ++ /// ++ /// Corresponds to `CURLINFO_HEADER_SIZE` and may return an error if the ++ /// option isn't supported. ++ pub fn header_size(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_HEADER_SIZE).map(|c| c as u64) ++ } ++ ++ /// Get size of sent request. ++ /// ++ /// Corresponds to `CURLINFO_REQUEST_SIZE` and may return an error if the ++ /// option isn't supported. ++ pub fn request_size(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_REQUEST_SIZE).map(|c| c as u64) ++ } ++ ++ /// Get Content-Type ++ /// ++ /// Returns the content-type of the downloaded object. This is the value ++ /// read from the Content-Type: field. If you get `None`, it means that the ++ /// server didn't send a valid Content-Type header or that the protocol ++ /// used doesn't support this. ++ /// ++ /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the ++ /// option isn't supported. ++ pub fn content_type(&mut self) -> Result, Error> { ++ self.getopt_str(curl_sys::CURLINFO_CONTENT_TYPE) ++ } ++ ++ /// Get Content-Type, in bytes ++ /// ++ /// Returns the content-type of the downloaded object. This is the value ++ /// read from the Content-Type: field. If you get `None`, it means that the ++ /// server didn't send a valid Content-Type header or that the protocol ++ /// used doesn't support this. ++ /// ++ /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the ++ /// option isn't supported. ++ pub fn content_type_bytes(&mut self) -> Result, Error> { ++ self.getopt_bytes(curl_sys::CURLINFO_CONTENT_TYPE) ++ } ++ ++ /// Get errno number from last connect failure. ++ /// ++ /// Note that the value is only set on failure, it is not reset upon a ++ /// successful operation. The number is OS and system specific. ++ /// ++ /// Corresponds to `CURLINFO_OS_ERRNO` and may return an error if the ++ /// option isn't supported. ++ pub fn os_errno(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_OS_ERRNO).map(|c| c as i32) ++ } ++ ++ /// Get IP address of last connection. ++ /// ++ /// Returns a string holding the IP address of the most recent connection ++ /// done with this curl handle. This string may be IPv6 when that is ++ /// enabled. ++ /// ++ /// Corresponds to `CURLINFO_PRIMARY_IP` and may return an error if the ++ /// option isn't supported. ++ pub fn primary_ip(&mut self) -> Result, Error> { ++ self.getopt_str(curl_sys::CURLINFO_PRIMARY_IP) ++ } ++ ++ /// Get the latest destination port number ++ /// ++ /// Corresponds to `CURLINFO_PRIMARY_PORT` and may return an error if the ++ /// option isn't supported. ++ pub fn primary_port(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_PRIMARY_PORT).map(|c| c as u16) ++ } ++ ++ /// Get local IP address of last connection ++ /// ++ /// Returns a string holding the IP address of the local end of most recent ++ /// connection done with this curl handle. This string may be IPv6 when that ++ /// is enabled. ++ /// ++ /// Corresponds to `CURLINFO_LOCAL_IP` and may return an error if the ++ /// option isn't supported. ++ pub fn local_ip(&mut self) -> Result, Error> { ++ self.getopt_str(curl_sys::CURLINFO_LOCAL_IP) ++ } ++ ++ /// Get the latest local port number ++ /// ++ /// Corresponds to `CURLINFO_LOCAL_PORT` and may return an error if the ++ /// option isn't supported. ++ pub fn local_port(&mut self) -> Result { ++ self.getopt_long(curl_sys::CURLINFO_LOCAL_PORT).map(|c| c as u16) ++ } ++ ++ /// Get all known cookies ++ /// ++ /// Returns a linked-list of all cookies cURL knows (expired ones, too). ++ /// ++ /// Corresponds to the `CURLINFO_COOKIELIST` option and may return an error ++ /// if the option isn't supported. ++ pub fn cookies(&mut self) -> Result { ++ unsafe { ++ let mut list = 0 as *mut _; ++ let rc = curl_sys::curl_easy_getinfo(self.inner.handle, ++ curl_sys::CURLINFO_COOKIELIST, ++ &mut list); ++ try!(self.cvt(rc)); ++ Ok(list::from_raw(list)) ++ } ++ } ++ ++ // ========================================================================= ++ // Other methods ++ ++ /// After options have been set, this will perform the transfer described by ++ /// the options. ++ /// ++ /// This performs the request in a synchronous fashion. This can be used ++ /// multiple times for one easy handle and libcurl will attempt to re-use ++ /// the same connection for all transfers. ++ /// ++ /// This method will preserve all options configured in this handle for the ++ /// next request, and if that is not desired then the options can be ++ /// manually reset or the `reset` method can be called. ++ /// ++ /// Note that this method takes `&self`, which is quite important! This ++ /// allows applications to close over the handle in various callbacks to ++ /// call methods like `unpause_write` and `unpause_read` while a transfer is ++ /// in progress. ++ pub fn perform(&self) -> Result<(), Error> { ++ let ret = unsafe { ++ self.cvt(curl_sys::curl_easy_perform(self.inner.handle)) ++ }; ++ panic::propagate(); ++ return ret ++ } ++ ++ /// Unpause reading on a connection. ++ /// ++ /// Using this function, you can explicitly unpause a connection that was ++ /// previously paused. ++ /// ++ /// A connection can be paused by letting the read or the write callbacks ++ /// return `ReadError::Pause` or `WriteError::Pause`. ++ /// ++ /// To unpause, you may for example call this from the progress callback ++ /// which gets called at least once per second, even if the connection is ++ /// paused. ++ /// ++ /// The chance is high that you will get your write callback called before ++ /// this function returns. ++ pub fn unpause_read(&self) -> Result<(), Error> { ++ unsafe { ++ let rc = curl_sys::curl_easy_pause(self.inner.handle, ++ curl_sys::CURLPAUSE_RECV_CONT); ++ self.cvt(rc) ++ } ++ } ++ ++ /// Unpause writing on a connection. ++ /// ++ /// Using this function, you can explicitly unpause a connection that was ++ /// previously paused. ++ /// ++ /// A connection can be paused by letting the read or the write callbacks ++ /// return `ReadError::Pause` or `WriteError::Pause`. A write callback that ++ /// returns pause signals to the library that it couldn't take care of any ++ /// data at all, and that data will then be delivered again to the callback ++ /// when the writing is later unpaused. ++ /// ++ /// To unpause, you may for example call this from the progress callback ++ /// which gets called at least once per second, even if the connection is ++ /// paused. ++ pub fn unpause_write(&self) -> Result<(), Error> { ++ unsafe { ++ let rc = curl_sys::curl_easy_pause(self.inner.handle, ++ curl_sys::CURLPAUSE_SEND_CONT); ++ self.cvt(rc) ++ } ++ } ++ ++ /// URL encodes a string `s` ++ pub fn url_encode(&mut self, s: &[u8]) -> String { ++ if s.len() == 0 { ++ return String::new() ++ } ++ unsafe { ++ let p = curl_sys::curl_easy_escape(self.inner.handle, ++ s.as_ptr() as *const _, ++ s.len() as c_int); ++ assert!(!p.is_null()); ++ let ret = str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap(); ++ let ret = String::from(ret); ++ curl_sys::curl_free(p as *mut _); ++ return ret ++ } ++ } ++ ++ /// URL decodes a string `s`, returning `None` if it fails ++ pub fn url_decode(&mut self, s: &str) -> Vec { ++ if s.len() == 0 { ++ return Vec::new(); ++ } ++ ++ // Work around https://curl.haxx.se/docs/adv_20130622.html, a bug where ++ // if the last few characters are a bad escape then curl will have a ++ // buffer overrun. ++ let mut iter = s.chars().rev(); ++ let orig_len = s.len(); ++ let mut data; ++ let mut s = s; ++ if iter.next() == Some('%') || ++ iter.next() == Some('%') || ++ iter.next() == Some('%') { ++ data = s.to_string(); ++ data.push(0u8 as char); ++ s = &data[..]; ++ } ++ unsafe { ++ let mut len = 0; ++ let p = curl_sys::curl_easy_unescape(self.inner.handle, ++ s.as_ptr() as *const _, ++ orig_len as c_int, ++ &mut len); ++ assert!(!p.is_null()); ++ let slice = slice::from_raw_parts(p as *const u8, len as usize); ++ let ret = slice.to_vec(); ++ curl_sys::curl_free(p as *mut _); ++ return ret ++ } ++ } ++ ++ // TODO: I don't think this is safe, you can drop this which has all the ++ // callback data and then the next is use-after-free ++ // ++ // /// Attempts to clone this handle, returning a new session handle with the ++ // /// same options set for this handle. ++ // /// ++ // /// Internal state info and things like persistent connections ccannot be ++ // /// transferred. ++ // /// ++ // /// # Errors ++ // /// ++ // /// If a new handle could not be allocated or another error happens, `None` ++ // /// is returned. ++ // pub fn try_clone<'b>(&mut self) -> Option> { ++ // unsafe { ++ // let handle = curl_sys::curl_easy_duphandle(self.handle); ++ // if handle.is_null() { ++ // None ++ // } else { ++ // Some(Easy { ++ // handle: handle, ++ // data: blank_data(), ++ // _marker: marker::PhantomData, ++ // }) ++ // } ++ // } ++ // } ++ ++ /// Receives data from a connected socket. ++ /// ++ /// Only useful after a successful `perform` with the `connect_only` option ++ /// set as well. ++ pub fn recv(&mut self, data: &mut [u8]) -> Result { ++ unsafe { ++ let mut n = 0; ++ let r = curl_sys::curl_easy_recv(self.inner.handle, ++ data.as_mut_ptr() as *mut _, ++ data.len(), ++ &mut n); ++ if r == curl_sys::CURLE_OK { ++ Ok(n) ++ } else { ++ Err(Error::new(r)) ++ } ++ } ++ } ++ ++ /// Sends data over the connected socket. ++ /// ++ /// Only useful after a successful `perform` with the `connect_only` option ++ /// set as well. ++ pub fn send(&mut self, data: &[u8]) -> Result { ++ unsafe { ++ let mut n = 0; ++ let rc = curl_sys::curl_easy_send(self.inner.handle, ++ data.as_ptr() as *const _, ++ data.len(), ++ &mut n); ++ try!(self.cvt(rc)); ++ Ok(n) ++ } ++ } ++ ++ /// Get a pointer to the raw underlying CURL handle. ++ pub fn raw(&self) -> *mut curl_sys::CURL { ++ self.inner.handle ++ } ++ ++ #[cfg(unix)] ++ fn setopt_path(&mut self, ++ opt: curl_sys::CURLoption, ++ val: &Path) -> Result<(), Error> { ++ use std::os::unix::prelude::*; ++ let s = try!(CString::new(val.as_os_str().as_bytes())); ++ self.setopt_str(opt, &s) ++ } ++ ++ #[cfg(windows)] ++ fn setopt_path(&mut self, ++ opt: curl_sys::CURLoption, ++ val: &Path) -> Result<(), Error> { ++ match val.to_str() { ++ Some(s) => self.setopt_str(opt, &try!(CString::new(s))), ++ None => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), ++ } ++ } ++ ++ fn setopt_long(&mut self, ++ opt: curl_sys::CURLoption, ++ val: c_long) -> Result<(), Error> { ++ unsafe { ++ self.cvt(curl_sys::curl_easy_setopt(self.inner.handle, opt, val)) ++ } ++ } ++ ++ fn setopt_str(&mut self, ++ opt: curl_sys::CURLoption, ++ val: &CStr) -> Result<(), Error> { ++ self.setopt_ptr(opt, val.as_ptr()) ++ } ++ ++ fn setopt_ptr(&self, ++ opt: curl_sys::CURLoption, ++ val: *const c_char) -> Result<(), Error> { ++ unsafe { ++ self.cvt(curl_sys::curl_easy_setopt(self.inner.handle, opt, val)) ++ } ++ } ++ ++ fn setopt_off_t(&mut self, ++ opt: curl_sys::CURLoption, ++ val: curl_sys::curl_off_t) -> Result<(), Error> { ++ unsafe { ++ let rc = curl_sys::curl_easy_setopt(self.inner.handle, opt, val); ++ self.cvt(rc) ++ } ++ } ++ ++ fn getopt_bytes(&mut self, opt: curl_sys::CURLINFO) ++ -> Result, Error> { ++ unsafe { ++ let p = try!(self.getopt_ptr(opt)); ++ if p.is_null() { ++ Ok(None) ++ } else { ++ Ok(Some(CStr::from_ptr(p).to_bytes())) ++ } ++ } ++ } ++ ++ fn getopt_ptr(&mut self, opt: curl_sys::CURLINFO) ++ -> Result<*const c_char, Error> { ++ unsafe { ++ let mut p = 0 as *const c_char; ++ let rc = curl_sys::curl_easy_getinfo(self.inner.handle, opt, &mut p); ++ try!(self.cvt(rc)); ++ Ok(p) ++ } ++ } ++ ++ fn getopt_str(&mut self, opt: curl_sys::CURLINFO) ++ -> Result, Error> { ++ match self.getopt_bytes(opt) { ++ Ok(None) => Ok(None), ++ Err(e) => Err(e), ++ Ok(Some(bytes)) => { ++ match str::from_utf8(bytes) { ++ Ok(s) => Ok(Some(s)), ++ Err(_) => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), ++ } ++ } ++ } ++ } ++ ++ fn getopt_long(&mut self, opt: curl_sys::CURLINFO) -> Result { ++ unsafe { ++ let mut p = 0; ++ let rc = curl_sys::curl_easy_getinfo(self.inner.handle, opt, &mut p); ++ try!(self.cvt(rc)); ++ Ok(p) ++ } ++ } ++ ++ fn getopt_double(&mut self, opt: curl_sys::CURLINFO) -> Result { ++ unsafe { ++ let mut p = 0 as c_double; ++ let rc = curl_sys::curl_easy_getinfo(self.inner.handle, opt, &mut p); ++ try!(self.cvt(rc)); ++ Ok(p) ++ } ++ } ++ ++ fn cvt(&self, rc: curl_sys::CURLcode) -> Result<(), Error> { ++ if rc == curl_sys::CURLE_OK { ++ return Ok(()) ++ } ++ let mut buf = self.inner.error_buf.borrow_mut(); ++ if buf[0] == 0 { ++ return Err(Error::new(rc)) ++ } ++ let pos = buf.iter().position(|i| *i == 0).unwrap_or(buf.len()); ++ let msg = String::from_utf8_lossy(&buf[..pos]).into_owned(); ++ buf[0] = 0; ++ Err(::error::error_with_extra(rc, msg.into_boxed_str())) ++ } ++} ++ ++impl fmt::Debug for Easy2 { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Easy") ++ .field("handle", &self.inner.handle) ++ .field("handler", &self.inner.handle) ++ .finish() ++ } ++} ++ ++impl Drop for Easy2 { ++ fn drop(&mut self) { ++ unsafe { ++ curl_sys::curl_easy_cleanup(self.inner.handle); ++ } ++ } ++} ++ ++extern fn header_cb(buffer: *mut c_char, ++ size: size_t, ++ nitems: size_t, ++ userptr: *mut c_void) -> size_t { ++ let keep_going = panic::catch(|| unsafe { ++ let data = slice::from_raw_parts(buffer as *const u8, ++ size * nitems); ++ (*(userptr as *mut Inner)).handler.header(data) ++ }).unwrap_or(false); ++ if keep_going { ++ size * nitems ++ } else { ++ !0 ++ } ++} ++ ++extern fn write_cb(ptr: *mut c_char, ++ size: size_t, ++ nmemb: size_t, ++ data: *mut c_void) -> size_t { ++ panic::catch(|| unsafe { ++ let input = slice::from_raw_parts(ptr as *const u8, ++ size * nmemb); ++ match (*(data as *mut Inner)).handler.write(input) { ++ Ok(s) => s, ++ Err(WriteError::Pause) | ++ Err(WriteError::__Nonexhaustive) => curl_sys::CURL_WRITEFUNC_PAUSE, ++ } ++ }).unwrap_or(!0) ++} ++ ++extern fn read_cb(ptr: *mut c_char, ++ size: size_t, ++ nmemb: size_t, ++ data: *mut c_void) -> size_t { ++ panic::catch(|| unsafe { ++ let input = slice::from_raw_parts_mut(ptr as *mut u8, ++ size * nmemb); ++ match (*(data as *mut Inner)).handler.read(input) { ++ Ok(s) => s, ++ Err(ReadError::Pause) => { ++ curl_sys::CURL_READFUNC_PAUSE ++ } ++ Err(ReadError::__Nonexhaustive) | ++ Err(ReadError::Abort) => { ++ curl_sys::CURL_READFUNC_ABORT ++ } ++ } ++ }).unwrap_or(!0) ++} ++ ++extern fn seek_cb(data: *mut c_void, ++ offset: curl_sys::curl_off_t, ++ origin: c_int) -> c_int { ++ panic::catch(|| unsafe { ++ let from = if origin == libc::SEEK_SET { ++ SeekFrom::Start(offset as u64) ++ } else { ++ panic!("unknown origin from libcurl: {}", origin); ++ }; ++ (*(data as *mut Inner)).handler.seek(from) as c_int ++ }).unwrap_or(!0) ++} ++ ++extern fn progress_cb(data: *mut c_void, ++ dltotal: c_double, ++ dlnow: c_double, ++ ultotal: c_double, ++ ulnow: c_double) -> c_int { ++ let keep_going = panic::catch(|| unsafe { ++ (*(data as *mut Inner)).handler.progress(dltotal, dlnow, ultotal, ulnow) ++ }).unwrap_or(false); ++ if keep_going { ++ 0 ++ } else { ++ 1 ++ } ++} ++ ++// TODO: expose `handle`? is that safe? ++extern fn debug_cb(_handle: *mut curl_sys::CURL, ++ kind: curl_sys::curl_infotype, ++ data: *mut c_char, ++ size: size_t, ++ userptr: *mut c_void) -> c_int { ++ panic::catch(|| unsafe { ++ let data = slice::from_raw_parts(data as *const u8, size); ++ let kind = match kind { ++ curl_sys::CURLINFO_TEXT => InfoType::Text, ++ curl_sys::CURLINFO_HEADER_IN => InfoType::HeaderIn, ++ curl_sys::CURLINFO_HEADER_OUT => InfoType::HeaderOut, ++ curl_sys::CURLINFO_DATA_IN => InfoType::DataIn, ++ curl_sys::CURLINFO_DATA_OUT => InfoType::DataOut, ++ curl_sys::CURLINFO_SSL_DATA_IN => InfoType::SslDataIn, ++ curl_sys::CURLINFO_SSL_DATA_OUT => InfoType::SslDataOut, ++ _ => return, ++ }; ++ (*(userptr as *mut Inner)).handler.debug(kind, data) ++ }); ++ return 0 ++} ++ ++extern fn ssl_ctx_cb(_handle: *mut curl_sys::CURL, ++ ssl_ctx: *mut c_void, ++ data: *mut c_void) -> curl_sys::CURLcode { ++ let res = panic::catch(|| unsafe { ++ match (*(data as *mut Inner)).handler.ssl_ctx(ssl_ctx) { ++ Ok(()) => curl_sys::CURLE_OK, ++ Err(e) => e.code(), ++ } ++ }); ++ // Default to a generic SSL error in case of panic. This ++ // shouldn't really matter since the error should be ++ // propagated later on but better safe than sorry... ++ res.unwrap_or(curl_sys::CURLE_SSL_CONNECT_ERROR) ++} ++ ++// TODO: expose `purpose` and `sockaddr` inside of `address` ++extern fn opensocket_cb(data: *mut c_void, ++ _purpose: curl_sys::curlsocktype, ++ address: *mut curl_sys::curl_sockaddr) ++ -> curl_sys::curl_socket_t ++{ ++ let res = panic::catch(|| unsafe { ++ (*(data as *mut Inner)).handler.open_socket((*address).family, ++ (*address).socktype, ++ (*address).protocol) ++ .unwrap_or(curl_sys::CURL_SOCKET_BAD) ++ }); ++ res.unwrap_or(curl_sys::CURL_SOCKET_BAD) ++} ++ ++fn double_seconds_to_duration(seconds: f64) -> Duration { ++ let whole_seconds = seconds.trunc() as u64; ++ let nanos = seconds.fract() * 1_000_000_000f64; ++ Duration::new(whole_seconds, nanos as u32) ++} ++ ++#[test] ++fn double_seconds_to_duration_whole_second() { ++ let dur = double_seconds_to_duration(1.0); ++ assert_eq!(dur.as_secs(), 1); ++ assert_eq!(dur.subsec_nanos(), 0); ++} ++ ++#[test] ++fn double_seconds_to_duration_sub_second1() { ++ let dur = double_seconds_to_duration(0.0); ++ assert_eq!(dur.as_secs(), 0); ++ assert_eq!(dur.subsec_nanos(), 0); ++} ++ ++#[test] ++fn double_seconds_to_duration_sub_second2() { ++ let dur = double_seconds_to_duration(0.5); ++ assert_eq!(dur.as_secs(), 0); ++ assert_eq!(dur.subsec_nanos(), 500_000_000); ++} ++ ++impl Auth { ++ /// Creates a new set of authentications with no members. ++ /// ++ /// An `Auth` structure is used to configure which forms of authentication ++ /// are attempted when negotiating connections with servers. ++ pub fn new() -> Auth { ++ Auth { bits: 0 } ++ } ++ ++ /// HTTP Basic authentication. ++ /// ++ /// This is the default choice, and the only method that is in wide-spread ++ /// use and supported virtually everywhere. This sends the user name and ++ /// password over the network in plain text, easily captured by others. ++ pub fn basic(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_BASIC, on) ++ } ++ ++ /// HTTP Digest authentication. ++ /// ++ /// Digest authentication is defined in RFC 2617 and is a more secure way to ++ /// do authentication over public networks than the regular old-fashioned ++ /// Basic method. ++ pub fn digest(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_DIGEST, on) ++ } ++ ++ /// HTTP Digest authentication with an IE flavor. ++ /// ++ /// Digest authentication is defined in RFC 2617 and is a more secure way to ++ /// do authentication over public networks than the regular old-fashioned ++ /// Basic method. The IE flavor is simply that libcurl will use a special ++ /// "quirk" that IE is known to have used before version 7 and that some ++ /// servers require the client to use. ++ pub fn digest_ie(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_DIGEST_IE, on) ++ } ++ ++ /// HTTP Negotiate (SPNEGO) authentication. ++ /// ++ /// Negotiate authentication is defined in RFC 4559 and is the most secure ++ /// way to perform authentication over HTTP. ++ /// ++ /// You need to build libcurl with a suitable GSS-API library or SSPI on ++ /// Windows for this to work. ++ pub fn gssnegotiate(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_GSSNEGOTIATE, on) ++ } ++ ++ /// HTTP NTLM authentication. ++ /// ++ /// A proprietary protocol invented and used by Microsoft. It uses a ++ /// challenge-response and hash concept similar to Digest, to prevent the ++ /// password from being eavesdropped. ++ /// ++ /// You need to build libcurl with either OpenSSL, GnuTLS or NSS support for ++ /// this option to work, or build libcurl on Windows with SSPI support. ++ pub fn ntlm(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_NTLM, on) ++ } ++ ++ /// NTLM delegating to winbind helper. ++ /// ++ /// Authentication is performed by a separate binary application that is ++ /// executed when needed. The name of the application is specified at ++ /// compile time but is typically /usr/bin/ntlm_auth ++ /// ++ /// Note that libcurl will fork when necessary to run the winbind ++ /// application and kill it when complete, calling waitpid() to await its ++ /// exit when done. On POSIX operating systems, killing the process will ++ /// cause a SIGCHLD signal to be raised (regardless of whether ++ /// CURLOPT_NOSIGNAL is set), which must be handled intelligently by the ++ /// application. In particular, the application must not unconditionally ++ /// call wait() in its SIGCHLD signal handler to avoid being subject to a ++ /// race condition. This behavior is subject to change in future versions of ++ /// libcurl. ++ /// ++ /// A proprietary protocol invented and used by Microsoft. It uses a ++ /// challenge-response and hash concept similar to Digest, to prevent the ++ /// password from being eavesdropped. ++ pub fn ntlm_wb(&mut self, on: bool) -> &mut Auth { ++ self.flag(curl_sys::CURLAUTH_NTLM_WB, on) ++ } ++ ++ fn flag(&mut self, bit: c_ulong, on: bool) -> &mut Auth { ++ if on { ++ self.bits |= bit as c_long; ++ } else { ++ self.bits &= !bit as c_long; ++ } ++ self ++ } ++} ++ ++impl fmt::Debug for Auth { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let bits = self.bits as c_ulong; ++ f.debug_struct("Auth") ++ .field("basic", &(bits & curl_sys::CURLAUTH_BASIC != 0)) ++ .field("digest", &(bits & curl_sys::CURLAUTH_DIGEST != 0)) ++ .field("digest_ie", &(bits & curl_sys::CURLAUTH_DIGEST_IE != 0)) ++ .field("gssnegotiate", &(bits & curl_sys::CURLAUTH_GSSNEGOTIATE != 0)) ++ .field("ntlm", &(bits & curl_sys::CURLAUTH_NTLM != 0)) ++ .field("ntlm_wb", &(bits & curl_sys::CURLAUTH_NTLM_WB != 0)) ++ .finish() ++ } ++} ++ ++impl SslOpt { ++ /// Creates a new set of SSL options. ++ pub fn new() -> SslOpt { ++ SslOpt { bits: 0 } ++ } ++ ++ /// Tells libcurl to disable certificate revocation checks for those SSL ++ /// backends where such behavior is present. ++ /// ++ /// Currently this option is only supported for WinSSL (the native Windows ++ /// SSL library), with an exception in the case of Windows' Untrusted ++ /// Publishers blacklist which it seems can't be bypassed. This option may ++ /// have broader support to accommodate other SSL backends in the future. ++ /// https://curl.haxx.se/docs/ssl-compared.html ++ pub fn no_revoke(&mut self, on: bool) -> &mut SslOpt { ++ self.flag(curl_sys::CURLSSLOPT_NO_REVOKE, on) ++ } ++ ++ /// Tells libcurl to not attempt to use any workarounds for a security flaw ++ /// in the SSL3 and TLS1.0 protocols. ++ /// ++ /// If this option isn't used or this bit is set to 0, the SSL layer libcurl ++ /// uses may use a work-around for this flaw although it might cause ++ /// interoperability problems with some (older) SSL implementations. ++ /// ++ /// > WARNING: avoiding this work-around lessens the security, and by ++ /// > setting this option to 1 you ask for exactly that. This option is only ++ /// > supported for DarwinSSL, NSS and OpenSSL. ++ pub fn allow_beast(&mut self, on: bool) -> &mut SslOpt { ++ self.flag(curl_sys::CURLSSLOPT_ALLOW_BEAST, on) ++ } ++ ++ fn flag(&mut self, bit: c_long, on: bool) -> &mut SslOpt { ++ if on { ++ self.bits |= bit as c_long; ++ } else { ++ self.bits &= !bit as c_long; ++ } ++ self ++ } ++} ++ ++impl fmt::Debug for SslOpt { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("SslOpt") ++ .field("no_revoke", &(self.bits & curl_sys::CURLSSLOPT_NO_REVOKE != 0)) ++ .field("allow_beast", &(self.bits & curl_sys::CURLSSLOPT_ALLOW_BEAST != 0)) ++ .finish() ++ } ++} diff --cc vendor/curl-0.4.14/src/easy/list.rs index 000000000,000000000..732d00639 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/list.rs @@@ -1,0 -1,0 +1,99 @@@ ++use std::ffi::{CStr, CString}; ++use std::fmt; ++ ++use curl_sys; ++use Error; ++ ++/// A linked list of a strings ++pub struct List { ++ raw: *mut curl_sys::curl_slist, ++} ++ ++/// An iterator over `List` ++#[derive(Clone)] ++pub struct Iter<'a> { ++ _me: &'a List, ++ cur: *mut curl_sys::curl_slist, ++} ++ ++pub fn raw(list: &List) -> *mut curl_sys::curl_slist { ++ list.raw ++} ++ ++pub unsafe fn from_raw(raw: *mut curl_sys::curl_slist) -> List { ++ List { raw: raw } ++} ++ ++unsafe impl Send for List {} ++ ++impl List { ++ /// Creates a new empty list of strings. ++ pub fn new() -> List { ++ List { raw: 0 as *mut _ } ++ } ++ ++ /// Appends some data into this list. ++ pub fn append(&mut self, data: &str) -> Result<(), Error> { ++ let data = try!(CString::new(data)); ++ unsafe { ++ let raw = curl_sys::curl_slist_append(self.raw, data.as_ptr()); ++ assert!(!raw.is_null()); ++ self.raw = raw; ++ Ok(()) ++ } ++ } ++ ++ /// Returns an iterator over the nodes in this list. ++ pub fn iter(&self) -> Iter { ++ Iter { _me: self, cur: self.raw } ++ } ++} ++ ++impl fmt::Debug for List { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_list() ++ .entries(self.iter().map(String::from_utf8_lossy)) ++ .finish() ++ } ++} ++ ++impl<'a> IntoIterator for &'a List { ++ type IntoIter = Iter<'a>; ++ type Item = &'a [u8]; ++ ++ fn into_iter(self) -> Iter<'a> { ++ self.iter() ++ } ++} ++ ++impl Drop for List { ++ fn drop(&mut self) { ++ unsafe { ++ curl_sys::curl_slist_free_all(self.raw) ++ } ++ } ++} ++ ++impl<'a> Iterator for Iter<'a> { ++ type Item = &'a [u8]; ++ ++ fn next(&mut self) -> Option<&'a [u8]> { ++ if self.cur.is_null() { ++ return None ++ } ++ ++ unsafe { ++ let ret = Some(CStr::from_ptr((*self.cur).data).to_bytes()); ++ self.cur = (*self.cur).next; ++ return ret ++ } ++ } ++} ++ ++impl<'a> fmt::Debug for Iter<'a> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_list() ++ .entries(self.clone().map(String::from_utf8_lossy)) ++ .finish() ++ } ++} diff --cc vendor/curl-0.4.14/src/easy/mod.rs index 000000000,000000000..da8db0037 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/mod.rs @@@ -1,0 -1,0 +1,22 @@@ ++//! Bindings to the "easy" libcurl API. ++//! ++//! This module contains some simple types like `Easy` and `List` which are just ++//! wrappers around the corresponding libcurl types. There's also a few enums ++//! scattered about for various options here and there. ++//! ++//! Most simple usage of libcurl will likely use the `Easy` structure here, and ++//! you can find more docs about its usage on that struct. ++ ++mod list; ++mod form; ++mod handle; ++mod handler; ++mod windows; ++ ++pub use self::list::{List, Iter}; ++pub use self::form::{Form, Part}; ++pub use self::handle::{Easy, Transfer}; ++pub use self::handler::{Easy2, Handler}; ++pub use self::handler::{InfoType, SeekResult, ReadError, WriteError}; ++pub use self::handler::{TimeCondition, IpResolve, HttpVersion, SslVersion}; ++pub use self::handler::{SslOpt, NetRc, Auth, ProxyType}; diff --cc vendor/curl-0.4.14/src/easy/windows.rs index 000000000,000000000..018e3dba2 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/easy/windows.rs @@@ -1,0 -1,0 +1,136 @@@ ++#![allow(non_camel_case_types, non_snake_case)] ++ ++use libc::c_void; ++ ++#[cfg(target_env = "msvc")] ++mod win { ++ use kernel32; ++ use std::ffi::CString; ++ use std::mem; ++ use std::ptr; ++ use schannel::cert_context::ValidUses; ++ use schannel::cert_store::CertStore; ++ use winapi::{self, c_void, c_uchar, c_long, c_int}; ++ ++ fn lookup(module: &str, symbol: &str) -> Option<*const c_void> { ++ unsafe { ++ let symbol = CString::new(symbol).unwrap(); ++ let mut mod_buf: Vec = module.encode_utf16().collect(); ++ mod_buf.push(0); ++ let handle = kernel32::GetModuleHandleW(mod_buf.as_mut_ptr()); ++ let n = kernel32::GetProcAddress(handle, symbol.as_ptr()); ++ if n == ptr::null() { ++ None ++ } else { ++ Some(n) ++ } ++ } ++ } ++ ++ pub enum X509_STORE {} ++ pub enum X509 {} ++ pub enum SSL_CTX {} ++ ++ type d2i_X509_fn = unsafe extern "C" fn( ++ a: *mut *mut X509, ++ pp: *mut *const c_uchar, ++ length: c_long, ++ ) -> *mut X509; ++ type X509_free_fn = unsafe extern "C" fn(x: *mut X509); ++ type X509_STORE_add_cert_fn = unsafe extern "C" fn(store: *mut X509_STORE, x: *mut X509) ++ -> c_int; ++ type SSL_CTX_get_cert_store_fn = unsafe extern "C" fn(ctx: *const SSL_CTX) ++ -> *mut X509_STORE; ++ ++ struct OpenSSL { ++ d2i_X509: d2i_X509_fn, ++ X509_free: X509_free_fn, ++ X509_STORE_add_cert: X509_STORE_add_cert_fn, ++ SSL_CTX_get_cert_store: SSL_CTX_get_cert_store_fn, ++ } ++ ++ unsafe fn lookup_functions(crypto_module: &str, ssl_module: &str) ++ -> Option ++ { ++ macro_rules! get { ++ ($(let $sym:ident in $module:expr;)*) => ($( ++ let $sym = match lookup($module, stringify!($sym)) { ++ Some(p) => p, ++ None => return None, ++ }; ++ )*) ++ } ++ get! { ++ let d2i_X509 in crypto_module; ++ let X509_free in crypto_module; ++ let X509_STORE_add_cert in crypto_module; ++ let SSL_CTX_get_cert_store in ssl_module; ++ } ++ Some(OpenSSL { ++ d2i_X509: mem::transmute(d2i_X509), ++ X509_free: mem::transmute(X509_free), ++ X509_STORE_add_cert: mem::transmute(X509_STORE_add_cert), ++ SSL_CTX_get_cert_store: mem::transmute(SSL_CTX_get_cert_store), ++ }) ++ } ++ ++ pub unsafe fn add_certs_to_context(ssl_ctx: *mut c_void) { ++ // check the runtime version of OpenSSL ++ let openssl = match ::version::Version::get().ssl_version() { ++ Some(ssl_ver) if ssl_ver.starts_with("OpenSSL/1.1.0") => { ++ lookup_functions("libcrypto", "libssl") ++ } ++ Some(ssl_ver) if ssl_ver.starts_with("OpenSSL/1.0.2") => { ++ lookup_functions("libeay32", "ssleay32") ++ } ++ _ => return, ++ }; ++ let openssl = match openssl { ++ Some(s) => s, ++ None => return, ++ }; ++ ++ let openssl_store = (openssl.SSL_CTX_get_cert_store)(ssl_ctx as *const SSL_CTX); ++ let mut store = match CertStore::open_current_user("ROOT") { ++ Ok(s) => s, ++ Err(_) => return, ++ }; ++ ++ for cert in store.certs() { ++ let valid_uses = match cert.valid_uses() { ++ Ok(v) => v, ++ Err(_) => continue, ++ }; ++ ++ // check the extended key usage for the "Server Authentication" OID ++ match valid_uses { ++ ValidUses::All => {} ++ ValidUses::Oids(ref oids) => { ++ let oid = winapi::wincrypt::szOID_PKIX_KP_SERVER_AUTH.to_owned(); ++ if !oids.contains(&oid) { ++ continue ++ } ++ } ++ } ++ ++ let der = cert.to_der(); ++ let x509 = (openssl.d2i_X509)(ptr::null_mut(), ++ &mut der.as_ptr(), ++ der.len() as c_long); ++ if !x509.is_null() { ++ (openssl.X509_STORE_add_cert)(openssl_store, x509); ++ (openssl.X509_free)(x509); ++ } ++ } ++ } ++} ++ ++#[cfg(target_env = "msvc")] ++pub fn add_certs_to_context(ssl_ctx: *mut c_void) { ++ unsafe { ++ win::add_certs_to_context(ssl_ctx as *mut _); ++ } ++} ++ ++#[cfg(not(target_env = "msvc"))] ++pub fn add_certs_to_context(_: *mut c_void) {} diff --cc vendor/curl-0.4.14/src/error.rs index 000000000,000000000..8dacc2b49 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/error.rs @@@ -1,0 -1,0 +1,598 @@@ ++use std::error; ++use std::ffi::{self, CStr}; ++use std::fmt; ++use std::str; ++use std::io; ++ ++use curl_sys; ++ ++/// An error returned from various "easy" operations. ++/// ++/// This structure wraps a `CURLcode`. ++#[derive(Clone, PartialEq)] ++pub struct Error { ++ code: curl_sys::CURLcode, ++ extra: Option>, ++} ++ ++pub fn error_with_extra(code: curl_sys::CURLcode, extra: Box) -> Error { ++ Error { ++ code: code, ++ extra: Some(extra), ++ } ++} ++ ++impl Error { ++ /// Creates a new error from the underlying code returned by libcurl. ++ pub fn new(code: curl_sys::CURLcode) -> Error { ++ Error { ++ code: code, ++ extra: None, ++ } ++ } ++ ++ /// Returns whether this error corresponds to CURLE_UNSUPPORTED_PROTOCOL. ++ pub fn is_unsupported_protocol(&self) -> bool { ++ self.code == curl_sys::CURLE_UNSUPPORTED_PROTOCOL ++ } ++ ++ /// Returns whether this error corresponds to CURLE_FAILED_INIT. ++ pub fn is_failed_init(&self) -> bool { ++ self.code == curl_sys::CURLE_FAILED_INIT ++ } ++ ++ /// Returns whether this error corresponds to CURLE_URL_MALFORMAT. ++ pub fn is_url_malformed(&self) -> bool { ++ self.code == curl_sys::CURLE_URL_MALFORMAT ++ } ++ ++ // /// Returns whether this error corresponds to CURLE_NOT_BUILT_IN. ++ // pub fn is_not_built_in(&self) -> bool { ++ // self.code == curl_sys::CURLE_NOT_BUILT_IN ++ // } ++ ++ /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_PROXY. ++ pub fn is_couldnt_resolve_proxy(&self) -> bool { ++ self.code == curl_sys::CURLE_COULDNT_RESOLVE_PROXY ++ } ++ ++ /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_HOST. ++ pub fn is_couldnt_resolve_host(&self) -> bool { ++ self.code == curl_sys::CURLE_COULDNT_RESOLVE_HOST ++ } ++ ++ /// Returns whether this error corresponds to CURLE_COULDNT_CONNECT. ++ pub fn is_couldnt_connect(&self) -> bool { ++ self.code == curl_sys::CURLE_COULDNT_CONNECT ++ } ++ ++ /// Returns whether this error corresponds to CURLE_REMOTE_ACCESS_DENIED. ++ pub fn is_remote_access_denied(&self) -> bool { ++ self.code == curl_sys::CURLE_REMOTE_ACCESS_DENIED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_PARTIAL_FILE. ++ pub fn is_partial_file(&self) -> bool { ++ self.code == curl_sys::CURLE_PARTIAL_FILE ++ } ++ ++ /// Returns whether this error corresponds to CURLE_QUOTE_ERROR. ++ pub fn is_quote_error(&self) -> bool { ++ self.code == curl_sys::CURLE_QUOTE_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_HTTP_RETURNED_ERROR. ++ pub fn is_http_returned_error(&self) -> bool { ++ self.code == curl_sys::CURLE_HTTP_RETURNED_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_READ_ERROR. ++ pub fn is_read_error(&self) -> bool { ++ self.code == curl_sys::CURLE_READ_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_WRITE_ERROR. ++ pub fn is_write_error(&self) -> bool { ++ self.code == curl_sys::CURLE_WRITE_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_UPLOAD_FAILED. ++ pub fn is_upload_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_UPLOAD_FAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_OUT_OF_MEMORY. ++ pub fn is_out_of_memory(&self) -> bool { ++ self.code == curl_sys::CURLE_OUT_OF_MEMORY ++ } ++ ++ /// Returns whether this error corresponds to CURLE_OPERATION_TIMEDOUT. ++ pub fn is_operation_timedout(&self) -> bool { ++ self.code == curl_sys::CURLE_OPERATION_TIMEDOUT ++ } ++ ++ /// Returns whether this error corresponds to CURLE_RANGE_ERROR. ++ pub fn is_range_error(&self) -> bool { ++ self.code == curl_sys::CURLE_RANGE_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_HTTP_POST_ERROR. ++ pub fn is_http_post_error(&self) -> bool { ++ self.code == curl_sys::CURLE_HTTP_POST_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CONNECT_ERROR. ++ pub fn is_ssl_connect_error(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CONNECT_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_BAD_DOWNLOAD_RESUME. ++ pub fn is_bad_download_resume(&self) -> bool { ++ self.code == curl_sys::CURLE_BAD_DOWNLOAD_RESUME ++ } ++ ++ /// Returns whether this error corresponds to CURLE_FILE_COULDNT_READ_FILE. ++ pub fn is_file_couldnt_read_file(&self) -> bool { ++ self.code == curl_sys::CURLE_FILE_COULDNT_READ_FILE ++ } ++ ++ /// Returns whether this error corresponds to CURLE_FUNCTION_NOT_FOUND. ++ pub fn is_function_not_found(&self) -> bool { ++ self.code == curl_sys::CURLE_FUNCTION_NOT_FOUND ++ } ++ ++ /// Returns whether this error corresponds to CURLE_ABORTED_BY_CALLBACK. ++ pub fn is_aborted_by_callback(&self) -> bool { ++ self.code == curl_sys::CURLE_ABORTED_BY_CALLBACK ++ } ++ ++ /// Returns whether this error corresponds to CURLE_BAD_FUNCTION_ARGUMENT. ++ pub fn is_bad_function_argument(&self) -> bool { ++ self.code == curl_sys::CURLE_BAD_FUNCTION_ARGUMENT ++ } ++ ++ /// Returns whether this error corresponds to CURLE_INTERFACE_FAILED. ++ pub fn is_interface_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_INTERFACE_FAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_TOO_MANY_REDIRECTS. ++ pub fn is_too_many_redirects(&self) -> bool { ++ self.code == curl_sys::CURLE_TOO_MANY_REDIRECTS ++ } ++ ++ /// Returns whether this error corresponds to CURLE_UNKNOWN_OPTION. ++ pub fn is_unknown_option(&self) -> bool { ++ self.code == curl_sys::CURLE_UNKNOWN_OPTION ++ } ++ ++ /// Returns whether this error corresponds to CURLE_PEER_FAILED_VERIFICATION. ++ pub fn is_peer_failed_verification(&self) -> bool { ++ self.code == curl_sys::CURLE_PEER_FAILED_VERIFICATION ++ } ++ ++ /// Returns whether this error corresponds to CURLE_GOT_NOTHING. ++ pub fn is_got_nothing(&self) -> bool { ++ self.code == curl_sys::CURLE_GOT_NOTHING ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_ENGINE_NOTFOUND. ++ pub fn is_ssl_engine_notfound(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_ENGINE_NOTFOUND ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_ENGINE_SETFAILED. ++ pub fn is_ssl_engine_setfailed(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_ENGINE_SETFAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SEND_ERROR. ++ pub fn is_send_error(&self) -> bool { ++ self.code == curl_sys::CURLE_SEND_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_RECV_ERROR. ++ pub fn is_recv_error(&self) -> bool { ++ self.code == curl_sys::CURLE_RECV_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CERTPROBLEM. ++ pub fn is_ssl_certproblem(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CERTPROBLEM ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CIPHER. ++ pub fn is_ssl_cipher(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CIPHER ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CACERT. ++ pub fn is_ssl_cacert(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CACERT ++ } ++ ++ /// Returns whether this error corresponds to CURLE_BAD_CONTENT_ENCODING. ++ pub fn is_bad_content_encoding(&self) -> bool { ++ self.code == curl_sys::CURLE_BAD_CONTENT_ENCODING ++ } ++ ++ /// Returns whether this error corresponds to CURLE_FILESIZE_EXCEEDED. ++ pub fn is_filesize_exceeded(&self) -> bool { ++ self.code == curl_sys::CURLE_FILESIZE_EXCEEDED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_USE_SSL_FAILED. ++ pub fn is_use_ssl_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_USE_SSL_FAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SEND_FAIL_REWIND. ++ pub fn is_send_fail_rewind(&self) -> bool { ++ self.code == curl_sys::CURLE_SEND_FAIL_REWIND ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_ENGINE_INITFAILED. ++ pub fn is_ssl_engine_initfailed(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_ENGINE_INITFAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_LOGIN_DENIED. ++ pub fn is_login_denied(&self) -> bool { ++ self.code == curl_sys::CURLE_LOGIN_DENIED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_CONV_FAILED. ++ pub fn is_conv_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_CONV_FAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_CONV_REQD. ++ pub fn is_conv_required(&self) -> bool { ++ self.code == curl_sys::CURLE_CONV_REQD ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CACERT_BADFILE. ++ pub fn is_ssl_cacert_badfile(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CACERT_BADFILE ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_CRL_BADFILE. ++ pub fn is_ssl_crl_badfile(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_CRL_BADFILE ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_SHUTDOWN_FAILED. ++ pub fn is_ssl_shutdown_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_SHUTDOWN_FAILED ++ } ++ ++ /// Returns whether this error corresponds to CURLE_AGAIN. ++ pub fn is_again(&self) -> bool { ++ self.code == curl_sys::CURLE_AGAIN ++ } ++ ++ /// Returns whether this error corresponds to CURLE_SSL_ISSUER_ERROR. ++ pub fn is_ssl_issuer_error(&self) -> bool { ++ self.code == curl_sys::CURLE_SSL_ISSUER_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLE_CHUNK_FAILED. ++ pub fn is_chunk_failed(&self) -> bool { ++ self.code == curl_sys::CURLE_CHUNK_FAILED ++ } ++ ++ // /// Returns whether this error corresponds to CURLE_NO_CONNECTION_AVAILABLE. ++ // pub fn is_no_connection_available(&self) -> bool { ++ // self.code == curl_sys::CURLE_NO_CONNECTION_AVAILABLE ++ // } ++ ++ /// Returns the value of the underlying error corresponding to libcurl. ++ pub fn code(&self) -> curl_sys::CURLcode { ++ self.code ++ } ++ ++ /// Returns the extra description of this error, if any is available. ++ pub fn extra_description(&self) -> Option<&str> { ++ self.extra.as_ref().map(|s| &**s) ++ } ++} ++ ++impl fmt::Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let desc = error::Error::description(self); ++ match self.extra { ++ Some(ref s) => write!(f, "[{}] {} ({})", self.code(), desc, s), ++ None => write!(f, "[{}] {}", self.code(), desc), ++ } ++ } ++} ++ ++impl fmt::Debug for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Error") ++ .field("description", &error::Error::description(self)) ++ .field("code", &self.code) ++ .field("extra", &self.extra) ++ .finish() ++ } ++} ++ ++impl error::Error for Error { ++ fn description(&self) -> &str { ++ unsafe { ++ let s = curl_sys::curl_easy_strerror(self.code); ++ assert!(!s.is_null()); ++ str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() ++ } ++ } ++} ++ ++/// An error returned from "share" operations. ++/// ++/// This structure wraps a `CURLSHcode`. ++#[derive(Clone, PartialEq)] ++pub struct ShareError { ++ code: curl_sys::CURLSHcode, ++} ++ ++impl ShareError { ++ /// Creates a new error from the underlying code returned by libcurl. ++ pub fn new(code: curl_sys::CURLSHcode) -> ShareError { ++ ShareError { code: code } ++ } ++ ++ /// Returns whether this error corresponds to CURLSHE_BAD_OPTION. ++ pub fn is_bad_option(&self) -> bool { ++ self.code == curl_sys::CURLSHE_BAD_OPTION ++ } ++ ++ /// Returns whether this error corresponds to CURLSHE_IN_USE. ++ pub fn is_in_use(&self) -> bool { ++ self.code == curl_sys::CURLSHE_IN_USE ++ } ++ ++ /// Returns whether this error corresponds to CURLSHE_INVALID. ++ pub fn is_invalid(&self) -> bool { ++ self.code == curl_sys::CURLSHE_INVALID ++ } ++ ++ /// Returns whether this error corresponds to CURLSHE_NOMEM. ++ pub fn is_nomem(&self) -> bool { ++ self.code == curl_sys::CURLSHE_NOMEM ++ } ++ ++ // /// Returns whether this error corresponds to CURLSHE_NOT_BUILT_IN. ++ // pub fn is_not_built_in(&self) -> bool { ++ // self.code == curl_sys::CURLSHE_NOT_BUILT_IN ++ // } ++ ++ /// Returns the value of the underlying error corresponding to libcurl. ++ pub fn code(&self) -> curl_sys::CURLSHcode { ++ self.code ++ } ++} ++ ++impl fmt::Display for ShareError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ error::Error::description(self).fmt(f) ++ } ++} ++ ++impl fmt::Debug for ShareError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "ShareError {{ description: {:?}, code: {} }}", ++ error::Error::description(self), ++ self.code) ++ } ++} ++ ++impl error::Error for ShareError { ++ fn description(&self) -> &str { ++ unsafe { ++ let s = curl_sys::curl_share_strerror(self.code); ++ assert!(!s.is_null()); ++ str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() ++ } ++ } ++} ++ ++/// An error from "multi" operations. ++/// ++/// THis structure wraps a `CURLMcode`. ++#[derive(Clone, PartialEq)] ++pub struct MultiError { ++ code: curl_sys::CURLMcode, ++} ++ ++impl MultiError { ++ /// Creates a new error from the underlying code returned by libcurl. ++ pub fn new(code: curl_sys::CURLMcode) -> MultiError { ++ MultiError { code: code } ++ } ++ ++ /// Returns whether this error corresponds to CURLM_BAD_HANDLE. ++ pub fn is_bad_handle(&self) -> bool { ++ self.code == curl_sys::CURLM_BAD_HANDLE ++ } ++ ++ /// Returns whether this error corresponds to CURLM_BAD_EASY_HANDLE. ++ pub fn is_bad_easy_handle(&self) -> bool { ++ self.code == curl_sys::CURLM_BAD_EASY_HANDLE ++ } ++ ++ /// Returns whether this error corresponds to CURLM_OUT_OF_MEMORY. ++ pub fn is_out_of_memory(&self) -> bool { ++ self.code == curl_sys::CURLM_OUT_OF_MEMORY ++ } ++ ++ /// Returns whether this error corresponds to CURLM_INTERNAL_ERROR. ++ pub fn is_internal_error(&self) -> bool { ++ self.code == curl_sys::CURLM_INTERNAL_ERROR ++ } ++ ++ /// Returns whether this error corresponds to CURLM_BAD_SOCKET. ++ pub fn is_bad_socket(&self) -> bool { ++ self.code == curl_sys::CURLM_BAD_SOCKET ++ } ++ ++ /// Returns whether this error corresponds to CURLM_UNKNOWN_OPTION. ++ pub fn is_unknown_option(&self) -> bool { ++ self.code == curl_sys::CURLM_UNKNOWN_OPTION ++ } ++ ++ /// Returns whether this error corresponds to CURLM_CALL_MULTI_PERFORM. ++ pub fn is_call_perform(&self) -> bool { ++ self.code == curl_sys::CURLM_CALL_MULTI_PERFORM ++ } ++ ++ // /// Returns whether this error corresponds to CURLM_ADDED_ALREADY. ++ // pub fn is_added_already(&self) -> bool { ++ // self.code == curl_sys::CURLM_ADDED_ALREADY ++ // } ++ ++ /// Returns the value of the underlying error corresponding to libcurl. ++ pub fn code(&self) -> curl_sys::CURLMcode { ++ self.code ++ } ++} ++ ++impl fmt::Display for MultiError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ error::Error::description(self).fmt(f) ++ } ++} ++ ++impl fmt::Debug for MultiError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "MultiError {{ description: {:?}, code: {} }}", ++ error::Error::description(self), ++ self.code) ++ } ++} ++ ++impl error::Error for MultiError { ++ fn description(&self) -> &str { ++ unsafe { ++ let s = curl_sys::curl_multi_strerror(self.code); ++ assert!(!s.is_null()); ++ str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() ++ } ++ } ++} ++ ++ ++/// An error from "form add" operations. ++/// ++/// THis structure wraps a `CURLFORMcode`. ++#[derive(Clone, PartialEq)] ++pub struct FormError { ++ code: curl_sys::CURLFORMcode, ++} ++ ++impl FormError { ++ /// Creates a new error from the underlying code returned by libcurl. ++ pub fn new(code: curl_sys::CURLFORMcode) -> FormError { ++ FormError { code: code } ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_MEMORY. ++ pub fn is_memory(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_MEMORY ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_OPTION_TWICE. ++ pub fn is_option_twice(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_OPTION_TWICE ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_NULL. ++ pub fn is_null(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_NULL ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_UNKNOWN_OPTION. ++ pub fn is_unknown_option(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_UNKNOWN_OPTION ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_INCOMPLETE. ++ pub fn is_incomplete(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_INCOMPLETE ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_ILLEGAL_ARRAY. ++ pub fn is_illegal_array(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_ILLEGAL_ARRAY ++ } ++ ++ /// Returns whether this error corresponds to CURL_FORMADD_DISABLED. ++ pub fn is_disabled(&self) -> bool { ++ self.code == curl_sys::CURL_FORMADD_DISABLED ++ } ++ ++ /// Returns the value of the underlying error corresponding to libcurl. ++ pub fn code(&self) -> curl_sys::CURLFORMcode { ++ self.code ++ } ++} ++ ++impl fmt::Display for FormError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ error::Error::description(self).fmt(f) ++ } ++} ++ ++impl fmt::Debug for FormError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "FormError {{ description: {:?}, code: {} }}", ++ error::Error::description(self), ++ self.code) ++ } ++} ++ ++impl error::Error for FormError { ++ fn description(&self) -> &str { ++ match self.code { ++ curl_sys::CURL_FORMADD_MEMORY => "allocation failure", ++ curl_sys::CURL_FORMADD_OPTION_TWICE => "one option passed twice", ++ curl_sys::CURL_FORMADD_NULL => "null pointer given for string", ++ curl_sys::CURL_FORMADD_UNKNOWN_OPTION => "unknown option", ++ curl_sys::CURL_FORMADD_INCOMPLETE => "form information not complete", ++ curl_sys::CURL_FORMADD_ILLEGAL_ARRAY => "illegal array in option", ++ curl_sys::CURL_FORMADD_DISABLED => { ++ "libcurl does not have support for this option compiled in" ++ } ++ _ => "unknown form error", ++ } ++ } ++} ++ ++impl From for Error { ++ fn from(_: ffi::NulError) -> Error { ++ Error { code: curl_sys::CURLE_CONV_FAILED, extra: None } ++ } ++} ++ ++impl From for io::Error { ++ fn from(e: Error) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, e) ++ } ++} ++ ++impl From for io::Error { ++ fn from(e: ShareError) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, e) ++ } ++} ++ ++impl From for io::Error { ++ fn from(e: MultiError) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, e) ++ } ++} ++ ++impl From for io::Error { ++ fn from(e: FormError) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, e) ++ } ++} diff --cc vendor/curl-0.4.14/src/lib.rs index 000000000,000000000..c1d9ef0ad new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/lib.rs @@@ -1,0 -1,0 +1,129 @@@ ++//! Rust bindings to the libcurl C library ++//! ++//! This crate contains bindings for an HTTP/HTTPS client which is powered by ++//! [libcurl], the same library behind the `curl` command line tool. The API ++//! currently closely matches that of libcurl itself, except that a Rustic layer ++//! of safety is applied on top. ++//! ++//! [libcurl]: https://curl.haxx.se/libcurl/ ++//! ++//! # The "Easy" API ++//! ++//! The easiest way to send a request is to use the `Easy` api which corresponds ++//! to `CURL` in libcurl. This handle supports a wide variety of options and can ++//! be used to make a single blocking request in a thread. Callbacks can be ++//! specified to deal with data as it arrives and a handle can be reused to ++//! cache connections and such. ++//! ++//! ```rust,no_run ++//! use std::io::{stdout, Write}; ++//! ++//! use curl::easy::Easy; ++//! ++//! // Write the contents of rust-lang.org to stdout ++//! let mut easy = Easy::new(); ++//! easy.url("https://www.rust-lang.org/").unwrap(); ++//! easy.write_function(|data| { ++//! stdout().write_all(data).unwrap(); ++//! Ok(data.len()) ++//! }).unwrap(); ++//! easy.perform().unwrap(); ++//! ``` ++//! ++//! # What about multiple concurrent HTTP requests? ++//! ++//! One option you have currently is to send multiple requests in multiple ++//! threads, but otherwise libcurl has a "multi" interface for doing this ++//! operation. Initial bindings of this interface can be found in the `multi` ++//! module, but feedback is welcome! ++//! ++//! # Where does libcurl come from? ++//! ++//! This crate links to the `curl-sys` crate which is in turn responsible for ++//! acquiring and linking to the libcurl library. Currently this crate will ++//! build libcurl from source if one is not already detected on the system. ++//! ++//! There is a large number of releases for libcurl, all with different sets of ++//! capabilities. Robust programs may wish to inspect `Version::get()` to test ++//! what features are implemented in the linked build of libcurl at runtime. ++ ++#![deny(missing_docs, missing_debug_implementations)] ++#![doc(html_root_url = "https://docs.rs/curl/0.4")] ++ ++extern crate curl_sys; ++extern crate libc; ++extern crate socket2; ++ ++#[cfg(all(unix, not(target_os = "macos")))] ++extern crate openssl_sys; ++#[cfg(all(unix, not(target_os = "macos")))] ++extern crate openssl_probe; ++#[cfg(windows)] ++extern crate winapi; ++ ++#[cfg(target_env = "msvc")] ++extern crate kernel32; ++#[cfg(target_env = "msvc")] ++extern crate schannel; ++ ++use std::ffi::CStr; ++use std::str; ++use std::sync::{Once, ONCE_INIT}; ++ ++pub use error::{Error, ShareError, MultiError, FormError}; ++mod error; ++ ++pub use version::{Version, Protocols}; ++mod version; ++ ++mod panic; ++pub mod easy; ++pub mod multi; ++ ++/// Initializes the underlying libcurl library. ++/// ++/// It's not required to call this before the library is used, but it's ++/// recommended to do so as soon as the program starts. ++pub fn init() { ++ static INIT: Once = ONCE_INIT; ++ INIT.call_once(|| { ++ platform_init(); ++ unsafe { ++ assert_eq!(curl_sys::curl_global_init(curl_sys::CURL_GLOBAL_ALL), 0); ++ } ++ ++ // Note that we explicitly don't schedule a call to ++ // `curl_global_cleanup`. The documentation for that function says ++ // ++ // > You must not call it when any other thread in the program (i.e. a ++ // > thread sharing the same memory) is running. This doesn't just mean ++ // > no other thread that is using libcurl. ++ // ++ // We can't ever be sure of that, so unfortunately we can't call the ++ // function. ++ }); ++ ++ #[cfg(all(unix, not(target_os = "macos")))] ++ fn platform_init() { ++ openssl_sys::init(); ++ } ++ ++ #[cfg(not(all(unix, not(target_os = "macos"))))] ++ fn platform_init() {} ++} ++ ++unsafe fn opt_str<'a>(ptr: *const libc::c_char) -> Option<&'a str> { ++ if ptr.is_null() { ++ None ++ } else { ++ Some(str::from_utf8(CStr::from_ptr(ptr).to_bytes()).unwrap()) ++ } ++} ++ ++fn cvt(r: curl_sys::CURLcode) -> Result<(), Error> { ++ if r == curl_sys::CURLE_OK { ++ Ok(()) ++ } else { ++ Err(Error::new(r)) ++ } ++} diff --cc vendor/curl-0.4.14/src/multi.rs index 000000000,000000000..0de52c373 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/multi.rs @@@ -1,0 -1,0 +1,973 @@@ ++//! Multi - initiating multiple requests simultaneously ++ ++use std::fmt; ++use std::marker; ++use std::time::Duration; ++ ++use libc::{c_int, c_char, c_void, c_long, c_short}; ++use curl_sys; ++ ++#[cfg(windows)] ++use winapi::winsock2::fd_set; ++#[cfg(unix)] ++use libc::{fd_set, pollfd, POLLIN, POLLPRI, POLLOUT}; ++ ++use {MultiError, Error}; ++use easy::{Easy, Easy2}; ++use panic; ++ ++/// A multi handle for initiating multiple connections simultaneously. ++/// ++/// This structure corresponds to `CURLM` in libcurl and provides the ability to ++/// have multiple transfers in flight simultaneously. This handle is then used ++/// to manage each transfer. The main purpose of a `CURLM` is for the ++/// *application* to drive the I/O rather than libcurl itself doing all the ++/// blocking. Methods like `action` allow the application to inform libcurl of ++/// when events have happened. ++/// ++/// Lots more documentation can be found on the libcurl [multi tutorial] where ++/// the APIs correspond pretty closely with this crate. ++/// ++/// [multi tutorial]: https://curl.haxx.se/libcurl/c/libcurl-multi.html ++pub struct Multi { ++ raw: *mut curl_sys::CURLM, ++ data: Box, ++} ++ ++struct MultiData { ++ socket: Box, ++ timer: Box) -> bool + Send>, ++} ++ ++/// Message from the `messages` function of a multi handle. ++/// ++/// Currently only indicates whether a transfer is done. ++pub struct Message<'multi> { ++ ptr: *mut curl_sys::CURLMsg, ++ _multi: &'multi Multi, ++} ++ ++/// Wrapper around an easy handle while it's owned by a multi handle. ++/// ++/// Once an easy handle has been added to a multi handle then it can no longer ++/// be used via `perform`. This handle is also used to remove the easy handle ++/// from the multi handle when desired. ++pub struct EasyHandle { ++ easy: Easy, ++ // This is now effecitvely bound to a `Multi`, so it is no longer sendable. ++ _marker: marker::PhantomData<&'static Multi>, ++} ++ ++/// Wrapper around an easy handle while it's owned by a multi handle. ++/// ++/// Once an easy handle has been added to a multi handle then it can no longer ++/// be used via `perform`. This handle is also used to remove the easy handle ++/// from the multi handle when desired. ++pub struct Easy2Handle { ++ easy: Easy2, ++ // This is now effecitvely bound to a `Multi`, so it is no longer sendable. ++ _marker: marker::PhantomData<&'static Multi>, ++} ++ ++/// Notification of the events that have happened on a socket. ++/// ++/// This type is passed as an argument to the `action` method on a multi handle ++/// to indicate what events have occurred on a socket. ++pub struct Events { ++ bits: c_int, ++} ++ ++/// Notification of events that are requested on a socket. ++/// ++/// This type is yielded to the `socket_function` callback to indicate what ++/// events are requested on a socket. ++pub struct SocketEvents { ++ bits: c_int, ++} ++ ++/// Raw underlying socket type that the multi handles use ++pub type Socket = curl_sys::curl_socket_t; ++ ++/// File descriptor to wait on for use with the `wait` method on a multi handle. ++pub struct WaitFd { ++ inner: curl_sys::curl_waitfd, ++} ++ ++impl Multi { ++ /// Creates a new multi session through which multiple HTTP transfers can be ++ /// initiated. ++ pub fn new() -> Multi { ++ unsafe { ++ ::init(); ++ let ptr = curl_sys::curl_multi_init(); ++ assert!(!ptr.is_null()); ++ Multi { ++ raw: ptr, ++ data: Box::new(MultiData { ++ socket: Box::new(|_, _, _| ()), ++ timer: Box::new(|_| true), ++ }), ++ } ++ } ++ } ++ ++ /// Set the callback informed about what to wait for ++ /// ++ /// When the `action` function runs, it informs the application about ++ /// updates in the socket (file descriptor) status by doing none, one, or ++ /// multiple calls to the socket callback. The callback gets status updates ++ /// with changes since the previous time the callback was called. See ++ /// `action` for more details on how the callback is used and should work. ++ /// ++ /// The `SocketEvents` parameter informs the callback on the status of the ++ /// given socket, and the methods on that type can be used to learn about ++ /// what's going on with the socket. ++ /// ++ /// The third `usize` parameter is a custom value set by the `assign` method ++ /// below. ++ pub fn socket_function(&mut self, f: F) -> Result<(), MultiError> ++ where F: FnMut(Socket, SocketEvents, usize) + Send + 'static, ++ { ++ self._socket_function(Box::new(f)) ++ } ++ ++ fn _socket_function(&mut self, ++ f: Box) ++ -> Result<(), MultiError> ++ { ++ self.data.socket = f; ++ let cb: curl_sys::curl_socket_callback = cb; ++ try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETFUNCTION, ++ cb as usize as *const c_char)); ++ let ptr = &*self.data as *const _; ++ try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETDATA, ++ ptr as *const c_char)); ++ return Ok(()); ++ ++ // TODO: figure out how to expose `_easy` ++ extern fn cb(_easy: *mut curl_sys::CURL, ++ socket: curl_sys::curl_socket_t, ++ what: c_int, ++ userptr: *mut c_void, ++ socketp: *mut c_void) -> c_int { ++ panic::catch(|| unsafe { ++ let f = &mut (*(userptr as *mut MultiData)).socket; ++ f(socket, SocketEvents { bits: what }, socketp as usize) ++ }); ++ 0 ++ } ++ } ++ ++ /// Set data to associate with an internal socket ++ /// ++ /// This function creates an association in the multi handle between the ++ /// given socket and a private token of the application. This is designed ++ /// for `action` uses. ++ /// ++ /// When set, the token will be passed to all future socket callbacks for ++ /// the specified socket. ++ /// ++ /// If the given socket isn't already in use by libcurl, this function will ++ /// return an error. ++ /// ++ /// libcurl only keeps one single token associated with a socket, so ++ /// calling this function several times for the same socket will make the ++ /// last set token get used. ++ /// ++ /// The idea here being that this association (socket to token) is something ++ /// that just about every application that uses this API will need and then ++ /// libcurl can just as well do it since it already has an internal hash ++ /// table lookup for this. ++ /// ++ /// # Typical Usage ++ /// ++ /// In a typical application you allocate a struct or at least use some kind ++ /// of semi-dynamic data for each socket that we must wait for action on ++ /// when using the `action` approach. ++ /// ++ /// When our socket-callback gets called by libcurl and we get to know about ++ /// yet another socket to wait for, we can use `assign` to point out the ++ /// particular data so that when we get updates about this same socket ++ /// again, we don't have to find the struct associated with this socket by ++ /// ourselves. ++ pub fn assign(&self, ++ socket: Socket, ++ token: usize) -> Result<(), MultiError> { ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_assign(self.raw, socket, ++ token as *mut _))); ++ Ok(()) ++ } ++ } ++ ++ /// Set callback to receive timeout values ++ /// ++ /// Certain features, such as timeouts and retries, require you to call ++ /// libcurl even when there is no activity on the file descriptors. ++ /// ++ /// Your callback function should install a non-repeating timer with the ++ /// interval specified. Each time that timer fires, call either `action` or ++ /// `perform`, depending on which interface you use. ++ /// ++ /// A timeout value of `None` means you should delete your timer. ++ /// ++ /// A timeout value of 0 means you should call `action` or `perform` (once) ++ /// as soon as possible. ++ /// ++ /// This callback will only be called when the timeout changes. ++ /// ++ /// The timer callback should return `true` on success, and `false` on ++ /// error. This callback can be used instead of, or in addition to, ++ /// `get_timeout`. ++ pub fn timer_function(&mut self, f: F) -> Result<(), MultiError> ++ where F: FnMut(Option) -> bool + Send + 'static, ++ { ++ self._timer_function(Box::new(f)) ++ } ++ ++ fn _timer_function(&mut self, ++ f: Box) -> bool + Send>) ++ -> Result<(), MultiError> ++ { ++ self.data.timer = f; ++ let cb: curl_sys::curl_multi_timer_callback = cb; ++ try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERFUNCTION, ++ cb as usize as *const c_char)); ++ let ptr = &*self.data as *const _; ++ try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERDATA, ++ ptr as *const c_char)); ++ return Ok(()); ++ ++ // TODO: figure out how to expose `_multi` ++ extern fn cb(_multi: *mut curl_sys::CURLM, ++ timeout_ms: c_long, ++ user: *mut c_void) -> c_int { ++ let keep_going = panic::catch(|| unsafe { ++ let f = &mut (*(user as *mut MultiData)).timer; ++ if timeout_ms == -1 { ++ f(None) ++ } else { ++ f(Some(Duration::from_millis(timeout_ms as u64))) ++ } ++ }).unwrap_or(false); ++ if keep_going {0} else {-1} ++ } ++ } ++ ++ /// Enable or disable HTTP pipelining and multiplexing. ++ /// ++ /// When http_1 is true, enable HTTP/1.1 pipelining, which means that if ++ /// you add a second request that can use an already existing connection, ++ /// the second request will be "piped" on the same connection rather than ++ /// being executed in parallel. ++ /// ++ /// When multiplex is true, enable HTTP/2 multiplexing, which means that ++ /// follow-up requests can re-use an existing connection and send the new ++ /// request multiplexed over that at the same time as other transfers are ++ /// already using that single connection. ++ pub fn pipelining(&mut self, http_1: bool, multiplex: bool) -> Result<(), MultiError> { ++ let bitmask = if http_1 { curl_sys::CURLPIPE_HTTP1 } else { 0 } | if multiplex { curl_sys::CURLPIPE_MULTIPLEX } else { 0 }; ++ self.setopt_long(curl_sys::CURLMOPT_PIPELINING, bitmask) ++ } ++ ++ /// Sets the max number of connections to a single host. ++ /// ++ /// Pass a long to indicate the max number of simultaneously open connections ++ /// to a single host (a host being the same as a host name + port number pair). ++ /// For each new session to a host, libcurl will open up a new connection up to the ++ /// limit set by the provided value. When the limit is reached, the sessions will ++ /// be pending until a connection becomes available. If pipelining is enabled, ++ /// libcurl will try to pipeline if the host is capable of it. ++ pub fn set_max_host_connections(&mut self, val: usize) -> Result<(), MultiError> { ++ self.setopt_long(curl_sys::CURLMOPT_MAX_HOST_CONNECTIONS, val as c_long) ++ } ++ ++ /// Sets the pipeline length. ++ /// ++ /// This sets the max number that will be used as the maximum amount of ++ /// outstanding reuqests in an HTTP/1.1 pipelined connection. This option ++ /// is only used for HTTP/1.1 pipelining, and not HTTP/2 multiplexing. ++ pub fn set_pipeline_length(&mut self, val: usize) -> Result<(), MultiError> { ++ self.setopt_long(curl_sys::CURLMOPT_MAX_PIPELINE_LENGTH, val as c_long) ++ } ++ ++ fn setopt_long(&mut self, ++ opt: curl_sys::CURLMoption, ++ val: c_long) -> Result<(), MultiError> { ++ unsafe { ++ cvt(curl_sys::curl_multi_setopt(self.raw, opt, val)) ++ } ++ } ++ ++ fn setopt_ptr(&mut self, ++ opt: curl_sys::CURLMoption, ++ val: *const c_char) -> Result<(), MultiError> { ++ unsafe { ++ cvt(curl_sys::curl_multi_setopt(self.raw, opt, val)) ++ } ++ } ++ ++ /// Add an easy handle to a multi session ++ /// ++ /// Adds a standard easy handle to the multi stack. This function call will ++ /// make this multi handle control the specified easy handle. ++ /// ++ /// When an easy interface is added to a multi handle, it will use a shared ++ /// connection cache owned by the multi handle. Removing and adding new easy ++ /// handles will not affect the pool of connections or the ability to do ++ /// connection re-use. ++ /// ++ /// If you have `timer_function` set in the multi handle (and you really ++ /// should if you're working event-based with `action` and friends), that ++ /// callback will be called from within this function to ask for an updated ++ /// timer so that your main event loop will get the activity on this handle ++ /// to get started. ++ /// ++ /// The easy handle will remain added to the multi handle until you remove ++ /// it again with `remove` on the returned handle - even when a transfer ++ /// with that specific easy handle is completed. ++ pub fn add(&self, mut easy: Easy) -> Result { ++ // Clear any configuration set by previous transfers because we're ++ // moving this into a `Send+'static` situation now basically. ++ easy.transfer(); ++ ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_add_handle(self.raw, easy.raw()))); ++ } ++ Ok(EasyHandle { ++ easy: easy, ++ _marker: marker::PhantomData, ++ }) ++ } ++ ++ /// Same as `add`, but works with the `Easy2` type. ++ pub fn add2(&self, easy: Easy2) -> Result, MultiError> { ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_add_handle(self.raw, easy.raw()))); ++ } ++ Ok(Easy2Handle { ++ easy: easy, ++ _marker: marker::PhantomData, ++ }) ++ } ++ ++ /// Remove an easy handle from this multi session ++ /// ++ /// Removes the easy handle from this multi handle. This will make the ++ /// returned easy handle be removed from this multi handle's control. ++ /// ++ /// When the easy handle has been removed from a multi stack, it is again ++ /// perfectly legal to invoke `perform` on it. ++ /// ++ /// Removing an easy handle while being used is perfectly legal and will ++ /// effectively halt the transfer in progress involving that easy handle. ++ /// All other easy handles and transfers will remain unaffected. ++ pub fn remove(&self, easy: EasyHandle) -> Result { ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_remove_handle(self.raw, ++ easy.easy.raw()))); ++ } ++ Ok(easy.easy) ++ } ++ ++ /// Same as `remove`, but for `Easy2Handle`. ++ pub fn remove2(&self, easy: Easy2Handle) -> Result, MultiError> { ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_remove_handle(self.raw, ++ easy.easy.raw()))); ++ } ++ Ok(easy.easy) ++ } ++ ++ /// Read multi stack informationals ++ /// ++ /// Ask the multi handle if there are any messages/informationals from the ++ /// individual transfers. Messages may include informationals such as an ++ /// error code from the transfer or just the fact that a transfer is ++ /// completed. More details on these should be written down as well. ++ pub fn messages(&self, mut f: F) where F: FnMut(Message) { ++ self._messages(&mut f) ++ } ++ ++ fn _messages(&self, f: &mut FnMut(Message)) { ++ let mut queue = 0; ++ unsafe { ++ loop { ++ let ptr = curl_sys::curl_multi_info_read(self.raw, &mut queue); ++ if ptr.is_null() { ++ break ++ } ++ f(Message { ptr: ptr, _multi: self }) ++ } ++ } ++ } ++ ++ /// Inform of reads/writes available data given an action ++ /// ++ /// When the application has detected action on a socket handled by libcurl, ++ /// it should call this function with the sockfd argument set to ++ /// the socket with the action. When the events on a socket are known, they ++ /// can be passed `events`. When the events on a socket are unknown, pass ++ /// `Events::new()` instead, and libcurl will test the descriptor ++ /// internally. ++ /// ++ /// The returned integer will contain the number of running easy handles ++ /// within the multi handle. When this number reaches zero, all transfers ++ /// are complete/done. When you call `action` on a specific socket and the ++ /// counter decreases by one, it DOES NOT necessarily mean that this exact ++ /// socket/transfer is the one that completed. Use `messages` to figure out ++ /// which easy handle that completed. ++ /// ++ /// The `action` function informs the application about updates in the ++ /// socket (file descriptor) status by doing none, one, or multiple calls to ++ /// the socket callback function set with the `socket_function` method. They ++ /// update the status with changes since the previous time the callback was ++ /// called. ++ pub fn action(&self, socket: Socket, events: &Events) ++ -> Result { ++ let mut remaining = 0; ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_socket_action(self.raw, ++ socket, ++ events.bits, ++ &mut remaining))); ++ Ok(remaining as u32) ++ } ++ } ++ ++ /// Inform libcurl that a timeout has expired and sockets should be tested. ++ /// ++ /// The returned integer will contain the number of running easy handles ++ /// within the multi handle. When this number reaches zero, all transfers ++ /// are complete/done. When you call `action` on a specific socket and the ++ /// counter decreases by one, it DOES NOT necessarily mean that this exact ++ /// socket/transfer is the one that completed. Use `messages` to figure out ++ /// which easy handle that completed. ++ /// ++ /// Get the timeout time by calling the `timer_function` method. Your ++ /// application will then get called with information on how long to wait ++ /// for socket actions at most before doing the timeout action: call the ++ /// `timeout` method. You can also use the `get_timeout` function to ++ /// poll the value at any given time, but for an event-based system using ++ /// the callback is far better than relying on polling the timeout value. ++ pub fn timeout(&self) -> Result { ++ let mut remaining = 0; ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_socket_action(self.raw, ++ curl_sys::CURL_SOCKET_BAD, ++ 0, ++ &mut remaining))); ++ Ok(remaining as u32) ++ } ++ } ++ ++ /// Get how long to wait for action before proceeding ++ /// ++ /// An application using the libcurl multi interface should call ++ /// `get_timeout` to figure out how long it should wait for socket actions - ++ /// at most - before proceeding. ++ /// ++ /// Proceeding means either doing the socket-style timeout action: call the ++ /// `timeout` function, or call `perform` if you're using the simpler and ++ /// older multi interface approach. ++ /// ++ /// The timeout value returned is the duration at this very moment. If 0, it ++ /// means you should proceed immediately without waiting for anything. If it ++ /// returns `None`, there's no timeout at all set. ++ /// ++ /// Note: if libcurl returns a `None` timeout here, it just means that ++ /// libcurl currently has no stored timeout value. You must not wait too ++ /// long (more than a few seconds perhaps) before you call `perform` again. ++ pub fn get_timeout(&self) -> Result, MultiError> { ++ let mut ms = 0; ++ unsafe { ++ try!(cvt(curl_sys::curl_multi_timeout(self.raw, &mut ms))); ++ if ms == -1 { ++ Ok(None) ++ } else { ++ Ok(Some(Duration::from_millis(ms as u64))) ++ } ++ } ++ } ++ ++ /// Block until activity is detected or a timeout passes. ++ /// ++ /// The timeout is used in millisecond-precision. Large durations are ++ /// clamped at the maximum value curl accepts. ++ /// ++ /// The returned integer will contain the number of internal file ++ /// descriptors on which interesting events occured. ++ /// ++ /// This function is a simpler alternative to using `fdset()` and `select()` ++ /// and does not suffer from file descriptor limits. ++ /// ++ /// # Example ++ /// ++ /// ``` ++ /// use curl::multi::Multi; ++ /// use std::time::Duration; ++ /// ++ /// let m = Multi::new(); ++ /// ++ /// // Add some Easy handles... ++ /// ++ /// while m.perform().unwrap() > 0 { ++ /// m.wait(&mut [], Duration::from_secs(1)).unwrap(); ++ /// } ++ /// ``` ++ pub fn wait(&self, waitfds: &mut [WaitFd], timeout: Duration) ++ -> Result { ++ let timeout_ms = { ++ let secs = timeout.as_secs(); ++ if secs > (i32::max_value() / 1000) as u64 { ++ // Duration too large, clamp at maximum value. ++ i32::max_value() ++ } else { ++ secs as i32 * 1000 + timeout.subsec_nanos() as i32 / 1000_000 ++ } ++ }; ++ unsafe { ++ let mut ret = 0; ++ try!(cvt(curl_sys::curl_multi_wait(self.raw, ++ waitfds.as_mut_ptr() as *mut _, ++ waitfds.len() as u32, ++ timeout_ms, ++ &mut ret))); ++ Ok(ret as u32) ++ } ++ } ++ ++ /// Reads/writes available data from each easy handle. ++ /// ++ /// This function handles transfers on all the added handles that need ++ /// attention in an non-blocking fashion. ++ /// ++ /// When an application has found out there's data available for this handle ++ /// or a timeout has elapsed, the application should call this function to ++ /// read/write whatever there is to read or write right now etc. This ++ /// method returns as soon as the reads/writes are done. This function does ++ /// not require that there actually is any data available for reading or ++ /// that data can be written, it can be called just in case. It will return ++ /// the number of handles that still transfer data. ++ /// ++ /// If the amount of running handles is changed from the previous call (or ++ /// is less than the amount of easy handles you've added to the multi ++ /// handle), you know that there is one or more transfers less "running". ++ /// You can then call `info` to get information about each individual ++ /// completed transfer, and that returned info includes `Error` and more. ++ /// If an added handle fails very quickly, it may never be counted as a ++ /// running handle. ++ /// ++ /// When running_handles is set to zero (0) on the return of this function, ++ /// there is no longer any transfers in progress. ++ /// ++ /// # Return ++ /// ++ /// Before libcurl version 7.20.0: If you receive `is_call_perform`, this ++ /// basically means that you should call `perform` again, before you select ++ /// on more actions. You don't have to do it immediately, but the return ++ /// code means that libcurl may have more data available to return or that ++ /// there may be more data to send off before it is "satisfied". Do note ++ /// that `perform` will return `is_call_perform` only when it wants to be ++ /// called again immediately. When things are fine and there is nothing ++ /// immediate it wants done, it'll return `Ok` and you need to wait for ++ /// "action" and then call this function again. ++ /// ++ /// This function only returns errors etc regarding the whole multi stack. ++ /// Problems still might have occurred on individual transfers even when ++ /// this function returns `Ok`. Use `info` to figure out how individual ++ /// transfers did. ++ pub fn perform(&self) -> Result { ++ unsafe { ++ let mut ret = 0; ++ try!(cvt(curl_sys::curl_multi_perform(self.raw, &mut ret))); ++ Ok(ret as u32) ++ } ++ } ++ ++ /// Extracts file descriptor information from a multi handle ++ /// ++ /// This function extracts file descriptor information from a given ++ /// handle, and libcurl returns its `fd_set` sets. The application can use ++ /// these to `select()` on, but be sure to `FD_ZERO` them before calling ++ /// this function as curl_multi_fdset only adds its own descriptors, it ++ /// doesn't zero or otherwise remove any others. The curl_multi_perform ++ /// function should be called as soon as one of them is ready to be read ++ /// from or written to. ++ /// ++ /// If no file descriptors are set by libcurl, this function will return ++ /// `Ok(None)`. Otherwise `Ok(Some(n))` will be returned where `n` the ++ /// highest descriptor number libcurl set. When `Ok(None)` is returned it ++ /// is because libcurl currently does something that isn't possible for ++ /// your application to monitor with a socket and unfortunately you can ++ /// then not know exactly when the current action is completed using ++ /// `select()`. You then need to wait a while before you proceed and call ++ /// `perform` anyway. ++ /// ++ /// When doing `select()`, you should use `get_timeout` to figure out ++ /// how long to wait for action. Call `perform` even if no activity has ++ /// been seen on the `fd_set`s after the timeout expires as otherwise ++ /// internal retries and timeouts may not work as you'd think and want. ++ /// ++ /// If one of the sockets used by libcurl happens to be larger than what ++ /// can be set in an `fd_set`, which on POSIX systems means that the file ++ /// descriptor is larger than `FD_SETSIZE`, then libcurl will try to not ++ /// set it. Setting a too large file descriptor in an `fd_set` implies an out ++ /// of bounds write which can cause crashes, or worse. The effect of NOT ++ /// storing it will possibly save you from the crash, but will make your ++ /// program NOT wait for sockets it should wait for... ++ pub fn fdset2(&self, ++ read: Option<&mut curl_sys::fd_set>, ++ write: Option<&mut curl_sys::fd_set>, ++ except: Option<&mut curl_sys::fd_set>) -> Result, MultiError> { ++ unsafe { ++ let mut ret = 0; ++ let read = read.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ let write = write.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ let except = except.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ try!(cvt(curl_sys::curl_multi_fdset(self.raw, ++ read, ++ write, ++ except, ++ &mut ret))); ++ if ret == -1 { ++ Ok(None) ++ } else { ++ Ok(Some(ret)) ++ } ++ } ++ } ++ ++ #[doc(hidden)] ++ #[deprecated(note = "renamed to fdset2")] ++ pub fn fdset(&self, ++ read: Option<&mut fd_set>, ++ write: Option<&mut fd_set>, ++ except: Option<&mut fd_set>) -> Result, MultiError> { ++ unsafe { ++ let mut ret = 0; ++ let read = read.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ let write = write.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ let except = except.map(|r| r as *mut _).unwrap_or(0 as *mut _); ++ try!(cvt(curl_sys::curl_multi_fdset(self.raw, ++ read as *mut _, ++ write as *mut _, ++ except as *mut _, ++ &mut ret))); ++ if ret == -1 { ++ Ok(None) ++ } else { ++ Ok(Some(ret)) ++ } ++ } ++ } ++ ++ /// Attempt to close the multi handle and clean up all associated resources. ++ /// ++ /// Cleans up and removes a whole multi stack. It does not free or touch any ++ /// individual easy handles in any way - they still need to be closed ++ /// individually. ++ pub fn close(&self) -> Result<(), MultiError> { ++ unsafe { ++ cvt(curl_sys::curl_multi_cleanup(self.raw)) ++ } ++ } ++} ++ ++fn cvt(code: curl_sys::CURLMcode) -> Result<(), MultiError> { ++ if code == curl_sys::CURLM_OK { ++ Ok(()) ++ } else { ++ Err(MultiError::new(code)) ++ } ++} ++ ++impl fmt::Debug for Multi { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Multi") ++ .field("raw", &self.raw) ++ .finish() ++ } ++} ++ ++impl Drop for Multi { ++ fn drop(&mut self) { ++ let _ = self.close(); ++ } ++} ++ ++impl EasyHandle { ++ /// Sets an internal private token for this `EasyHandle`. ++ /// ++ /// This function will set the `CURLOPT_PRIVATE` field on the underlying ++ /// easy handle. ++ pub fn set_token(&mut self, token: usize) -> Result<(), Error> { ++ unsafe { ++ ::cvt(curl_sys::curl_easy_setopt(self.easy.raw(), ++ curl_sys::CURLOPT_PRIVATE, ++ token)) ++ } ++ } ++} ++ ++impl fmt::Debug for EasyHandle { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.easy.fmt(f) ++ } ++} ++ ++impl Easy2Handle { ++ /// Acquires a reference to the underlying handler for events. ++ pub fn get_ref(&self) -> &H { ++ self.easy.get_ref() ++ } ++ ++ /// Acquires a reference to the underlying handler for events. ++ pub fn get_mut(&mut self) -> &mut H { ++ self.easy.get_mut() ++ } ++ ++ /// Same as `EasyHandle::set_token` ++ pub fn set_token(&mut self, token: usize) -> Result<(), Error> { ++ unsafe { ++ ::cvt(curl_sys::curl_easy_setopt(self.easy.raw(), ++ curl_sys::CURLOPT_PRIVATE, ++ token)) ++ } ++ } ++} ++ ++impl fmt::Debug for Easy2Handle { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.easy.fmt(f) ++ } ++} ++ ++impl<'multi> Message<'multi> { ++ /// If this message indicates that a transfer has finished, returns the ++ /// result of the transfer in `Some`. ++ /// ++ /// If the message doesn't indicate that a transfer has finished, then ++ /// `None` is returned. ++ pub fn result(&self) -> Option> { ++ unsafe { ++ if (*self.ptr).msg == curl_sys::CURLMSG_DONE { ++ Some(::cvt((*self.ptr).data as curl_sys::CURLcode)) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// Returns whether this easy message was for the specified easy handle or ++ /// not. ++ pub fn is_for(&self, handle: &EasyHandle) -> bool { ++ unsafe { (*self.ptr).easy_handle == handle.easy.raw() } ++ } ++ ++ /// Same as `is_for`, but for `Easy2Handle`. ++ pub fn is_for2(&self, handle: &Easy2Handle) -> bool { ++ unsafe { (*self.ptr).easy_handle == handle.easy.raw() } ++ } ++ ++ /// Returns the token associated with the easy handle that this message ++ /// represents a completion for. ++ /// ++ /// This function will return the token assigned with ++ /// `EasyHandle::set_token`. This reads the `CURLINFO_PRIVATE` field of the ++ /// underlying `*mut CURL`. ++ pub fn token(&self) -> Result { ++ unsafe { ++ let mut p = 0usize; ++ try!(::cvt(curl_sys::curl_easy_getinfo((*self.ptr).easy_handle, ++ curl_sys::CURLINFO_PRIVATE, ++ &mut p))); ++ Ok(p) ++ } ++ } ++} ++ ++impl<'a> fmt::Debug for Message<'a> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Message") ++ .field("ptr", &self.ptr) ++ .finish() ++ } ++} ++ ++impl Events { ++ /// Creates a new blank event bit mask. ++ pub fn new() -> Events { ++ Events { bits: 0 } ++ } ++ ++ /// Set or unset the whether these events indicate that input is ready. ++ pub fn input(&mut self, val: bool) -> &mut Events { ++ self.flag(curl_sys::CURL_CSELECT_IN, val) ++ } ++ ++ /// Set or unset the whether these events indicate that output is ready. ++ pub fn output(&mut self, val: bool) -> &mut Events { ++ self.flag(curl_sys::CURL_CSELECT_OUT, val) ++ } ++ ++ /// Set or unset the whether these events indicate that an error has ++ /// happened. ++ pub fn error(&mut self, val: bool) -> &mut Events { ++ self.flag(curl_sys::CURL_CSELECT_ERR, val) ++ } ++ ++ fn flag(&mut self, flag: c_int, val: bool) -> &mut Events { ++ if val { ++ self.bits |= flag; ++ } else { ++ self.bits &= !flag; ++ } ++ self ++ } ++} ++ ++impl fmt::Debug for Events { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Events") ++ .field("input", &(self.bits & curl_sys::CURL_CSELECT_IN != 0)) ++ .field("output", &(self.bits & curl_sys::CURL_CSELECT_IN != 0)) ++ .field("error", &(self.bits & curl_sys::CURL_CSELECT_IN != 0)) ++ .finish() ++ } ++} ++ ++impl SocketEvents { ++ /// Wait for incoming data. For the socket to become readable. ++ pub fn input(&self) -> bool { ++ self.bits & curl_sys::CURL_POLL_IN == curl_sys::CURL_POLL_IN ++ } ++ ++ /// Wait for outgoing data. For the socket to become writable. ++ pub fn output(&self) -> bool { ++ self.bits & curl_sys::CURL_POLL_OUT == curl_sys::CURL_POLL_OUT ++ } ++ ++ /// Wait for incoming and outgoing data. For the socket to become readable ++ /// or writable. ++ pub fn input_and_output(&self) -> bool { ++ self.bits & curl_sys::CURL_POLL_INOUT == curl_sys::CURL_POLL_INOUT ++ } ++ ++ /// The specified socket/file descriptor is no longer used by libcurl. ++ pub fn remove(&self) -> bool { ++ self.bits & curl_sys::CURL_POLL_REMOVE == curl_sys::CURL_POLL_REMOVE ++ } ++} ++ ++impl fmt::Debug for SocketEvents { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("Events") ++ .field("input", &self.input()) ++ .field("output", &self.output()) ++ .field("remove", &self.remove()) ++ .finish() ++ } ++} ++ ++impl WaitFd { ++ /// Constructs an empty (invalid) WaitFd. ++ pub fn new() -> WaitFd { ++ WaitFd { ++ inner: curl_sys::curl_waitfd { ++ fd: 0, ++ events: 0, ++ revents: 0, ++ } ++ } ++ } ++ ++ /// Set the file descriptor to wait for. ++ pub fn set_fd(&mut self, fd: Socket) { ++ self.inner.fd = fd; ++ } ++ ++ /// Indicate that the socket should poll on read events such as new data ++ /// received. ++ /// ++ /// Corresponds to `CURL_WAIT_POLLIN`. ++ pub fn poll_on_read(&mut self, val: bool) -> &mut WaitFd { ++ self.flag(curl_sys::CURL_WAIT_POLLIN, val) ++ } ++ ++ /// Indicate that the socket should poll on high priority read events such ++ /// as out of band data. ++ /// ++ /// Corresponds to `CURL_WAIT_POLLPRI`. ++ pub fn poll_on_priority_read(&mut self, val: bool) -> &mut WaitFd { ++ self.flag(curl_sys::CURL_WAIT_POLLPRI, val) ++ } ++ ++ /// Indicate that the socket should poll on write events such as the socket ++ /// being clear to write without blocking. ++ /// ++ /// Corresponds to `CURL_WAIT_POLLOUT`. ++ pub fn poll_on_write(&mut self, val: bool) -> &mut WaitFd { ++ self.flag(curl_sys::CURL_WAIT_POLLOUT, val) ++ } ++ ++ fn flag(&mut self, flag: c_short, val: bool) -> &mut WaitFd { ++ if val { ++ self.inner.events |= flag; ++ } else { ++ self.inner.events &= !flag; ++ } ++ self ++ } ++ ++ /// After a call to `wait`, returns `true` if `poll_on_read` was set and a ++ /// read event occured. ++ pub fn received_read(&self) -> bool { ++ self.inner.revents & curl_sys::CURL_WAIT_POLLIN == curl_sys::CURL_WAIT_POLLIN ++ } ++ ++ /// After a call to `wait`, returns `true` if `poll_on_priority_read` was set and a ++ /// priority read event occured. ++ pub fn received_priority_read(&self) -> bool { ++ self.inner.revents & curl_sys::CURL_WAIT_POLLPRI == curl_sys::CURL_WAIT_POLLPRI ++ } ++ ++ /// After a call to `wait`, returns `true` if `poll_on_write` was set and a ++ /// write event occured. ++ pub fn received_write(&self) -> bool { ++ self.inner.revents & curl_sys::CURL_WAIT_POLLOUT == curl_sys::CURL_WAIT_POLLOUT ++ } ++} ++ ++#[cfg(unix)] ++impl From for WaitFd { ++ fn from(pfd: pollfd) -> WaitFd { ++ let mut events = 0; ++ if pfd.events & POLLIN == POLLIN { ++ events |= curl_sys::CURL_WAIT_POLLIN; ++ } ++ if pfd.events & POLLPRI == POLLPRI { ++ events |= curl_sys::CURL_WAIT_POLLPRI; ++ } ++ if pfd.events & POLLOUT == POLLOUT { ++ events |= curl_sys::CURL_WAIT_POLLOUT; ++ } ++ WaitFd { ++ inner: curl_sys::curl_waitfd { ++ fd: pfd.fd, ++ events: events, ++ revents: 0, ++ } ++ } ++ } ++} ++ ++impl fmt::Debug for WaitFd { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("WaitFd") ++ .field("fd", &self.inner.fd) ++ .field("events", &self.inner.fd) ++ .field("revents", &self.inner.fd) ++ .finish() ++ } ++} diff --cc vendor/curl-0.4.14/src/panic.rs index 000000000,000000000..1da217985 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/panic.rs @@@ -1,0 -1,0 +1,30 @@@ ++use std::any::Any; ++use std::cell::RefCell; ++use std::panic::{self, AssertUnwindSafe}; ++ ++thread_local!(static LAST_ERROR: RefCell>> = { ++ RefCell::new(None) ++}); ++ ++pub fn catch T>(f: F) -> Option { ++ if LAST_ERROR.with(|slot| slot.borrow().is_some()) { ++ return None ++ } ++ ++ // Note that `AssertUnwindSafe` is used here as we prevent reentering ++ // arbitrary code due to the `LAST_ERROR` check above plus propagation of a ++ // panic after we return back to user code from C. ++ match panic::catch_unwind(AssertUnwindSafe(f)) { ++ Ok(ret) => Some(ret), ++ Err(e) => { ++ LAST_ERROR.with(|slot| *slot.borrow_mut() = Some(e)); ++ None ++ } ++ } ++} ++ ++pub fn propagate() { ++ if let Some(t) = LAST_ERROR.with(|slot| slot.borrow_mut().take()) { ++ panic::resume_unwind(t) ++ } ++} diff --cc vendor/curl-0.4.14/src/version.rs index 000000000,000000000..4ca39c44e new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/src/version.rs @@@ -1,0 -1,0 +1,326 @@@ ++use std::ffi::CStr; ++use std::fmt; ++use std::str; ++ ++use curl_sys; ++use libc::{c_int, c_char}; ++ ++/// Version information about libcurl and the capabilities that it supports. ++pub struct Version { ++ inner: *mut curl_sys::curl_version_info_data, ++} ++ ++unsafe impl Send for Version {} ++unsafe impl Sync for Version {} ++ ++/// An iterator over the list of protocols a version supports. ++#[derive(Clone)] ++pub struct Protocols<'a> { ++ cur: *const *const c_char, ++ _inner: &'a Version, ++} ++ ++impl Version { ++ /// Returns the libcurl version that this library is currently linked against. ++ pub fn num() -> &'static str { ++ unsafe { ++ let s = CStr::from_ptr(curl_sys::curl_version() as *const _); ++ str::from_utf8(s.to_bytes()).unwrap() ++ } ++ } ++ ++ /// Returns the libcurl version that this library is currently linked against. ++ pub fn get() -> Version { ++ unsafe { ++ let ptr = curl_sys::curl_version_info(curl_sys::CURLVERSION_FOURTH); ++ assert!(!ptr.is_null()); ++ Version { inner: ptr } ++ } ++ } ++ ++ /// Returns the human readable version string, ++ pub fn version(&self) -> &str { ++ unsafe { ++ ::opt_str((*self.inner).version).unwrap() ++ } ++ } ++ ++ /// Returns a numeric representation of the version number ++ /// ++ /// This is a 24 bit number made up of the major number, minor, and then ++ /// patch number. For example 7.9.8 will return 0x070908. ++ pub fn version_num(&self) -> u32 { ++ unsafe { ++ (*self.inner).version_num as u32 ++ } ++ } ++ ++ /// Returns a human readable string of the host libcurl is built for. ++ /// ++ /// This is discovered as part of the build environment. ++ pub fn host(&self) -> &str { ++ unsafe { ++ ::opt_str((*self.inner).host).unwrap() ++ } ++ } ++ ++ /// Returns whether libcurl supports IPv6 ++ pub fn feature_ipv6(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_IPV6) ++ } ++ ++ /// Returns whether libcurl supports SSL ++ pub fn feature_ssl(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_SSL) ++ } ++ ++ /// Returns whether libcurl supports HTTP deflate via libz ++ pub fn feature_libz(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_LIBZ) ++ } ++ ++ /// Returns whether libcurl supports HTTP NTLM ++ pub fn feature_ntlm(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_NTLM) ++ } ++ ++ /// Returns whether libcurl supports HTTP GSSNEGOTIATE ++ pub fn feature_gss_negotiate(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_GSSNEGOTIATE) ++ } ++ ++ /// Returns whether libcurl was built with debug capabilities ++ pub fn feature_debug(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_DEBUG) ++ } ++ ++ /// Returns whether libcurl was built with SPNEGO authentication ++ pub fn feature_spnego(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_SPNEGO) ++ } ++ ++ /// Returns whether libcurl was built with large file support ++ pub fn feature_largefile(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_LARGEFILE) ++ } ++ ++ /// Returns whether libcurl was built with support for IDNA, domain names ++ /// with international letters. ++ pub fn feature_idn(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_IDN) ++ } ++ ++ /// Returns whether libcurl was built with support for SSPI. ++ pub fn feature_sspi(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_SSPI) ++ } ++ ++ /// Returns whether libcurl was built with asynchronous name lookups. ++ pub fn feature_async_dns(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_ASYNCHDNS) ++ } ++ ++ /// Returns whether libcurl was built with support for character ++ /// conversions. ++ pub fn feature_conv(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_CONV) ++ } ++ ++ /// Returns whether libcurl was built with support for TLS-SRP. ++ pub fn feature_tlsauth_srp(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_TLSAUTH_SRP) ++ } ++ ++ /// Returns whether libcurl was built with support for NTLM delegation to ++ /// winbind helper. ++ pub fn feature_ntlm_wb(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_NTLM_WB) ++ } ++ ++ /// Returns whether libcurl was built with support for unix domain socket ++ pub fn feature_unix_domain_socket(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_UNIX_SOCKETS) ++ } ++ ++ /// Returns whether libcurl was built with support for HTTP2. ++ pub fn feature_http2(&self) -> bool { ++ self.flag(curl_sys::CURL_VERSION_HTTP2) ++ } ++ ++ fn flag(&self, flag: c_int) -> bool { ++ unsafe { ++ (*self.inner).features & flag != 0 ++ } ++ } ++ ++ /// Returns the version of OpenSSL that is used, or None if there is no SSL ++ /// support. ++ pub fn ssl_version(&self) -> Option<&str> { ++ unsafe { ++ ::opt_str((*self.inner).ssl_version) ++ } ++ } ++ ++ /// Returns the version of libz that is used, or None if there is no libz ++ /// support. ++ pub fn libz_version(&self) -> Option<&str> { ++ unsafe { ++ ::opt_str((*self.inner).libz_version) ++ } ++ } ++ ++ /// Returns an iterator over the list of protocols that this build of ++ /// libcurl supports. ++ pub fn protocols(&self) -> Protocols { ++ unsafe { ++ Protocols { _inner: self, cur: (*self.inner).protocols } ++ } ++ } ++ ++ /// If available, the human readable version of ares that libcurl is linked ++ /// against. ++ pub fn ares_version(&self) -> Option<&str> { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_SECOND { ++ ::opt_str((*self.inner).ares) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of ares that libcurl is linked against. ++ pub fn ares_version_num(&self) -> Option { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_SECOND { ++ Some((*self.inner).ares_num as u32) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of libidn that libcurl is linked against. ++ pub fn libidn_version(&self) -> Option<&str> { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_THIRD { ++ ::opt_str((*self.inner).libidn) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of iconv libcurl is linked against. ++ pub fn iconv_version_num(&self) -> Option { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_FOURTH { ++ Some((*self.inner).iconv_ver_num as u32) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of iconv libcurl is linked against. ++ pub fn libssh_version(&self) -> Option<&str> { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_FOURTH { ++ ::opt_str((*self.inner).libssh_version) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of brotli libcurl is linked against. ++ pub fn brotli_version_num(&self) -> Option { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_FIFTH { ++ Some((*self.inner).brotli_ver_num) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// If available, the version of brotli libcurl is linked against. ++ pub fn brotli_version(&self) -> Option<&str> { ++ unsafe { ++ if (*self.inner).age >= curl_sys::CURLVERSION_FIFTH { ++ ::opt_str((*self.inner).brotli_version) ++ } else { ++ None ++ } ++ } ++ } ++} ++ ++impl fmt::Debug for Version { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let mut f = f.debug_struct("Version"); ++ f.field("version", &self.version()) ++ .field("host", &self.host()) ++ .field("feature_ipv6", &self.feature_ipv6()) ++ .field("feature_ssl", &self.feature_ssl()) ++ .field("feature_libz", &self.feature_libz()) ++ .field("feature_ntlm", &self.feature_ntlm()) ++ .field("feature_gss_negotiate", &self.feature_gss_negotiate()) ++ .field("feature_debug", &self.feature_debug()) ++ .field("feature_spnego", &self.feature_debug()) ++ .field("feature_largefile", &self.feature_debug()) ++ .field("feature_idn", &self.feature_debug()) ++ .field("feature_sspi", &self.feature_debug()) ++ .field("feature_async_dns", &self.feature_debug()) ++ .field("feature_conv", &self.feature_debug()) ++ .field("feature_tlsauth_srp", &self.feature_debug()) ++ .field("feature_ntlm_wb", &self.feature_debug()) ++ .field("feature_unix_domain_socket", &self.feature_debug()); ++ ++ if let Some(s) = self.ssl_version() { ++ f.field("ssl_version", &s); ++ } ++ if let Some(s) = self.libz_version() { ++ f.field("libz_version", &s); ++ } ++ if let Some(s) = self.ares_version() { ++ f.field("ares_version", &s); ++ } ++ if let Some(s) = self.libidn_version() { ++ f.field("libidn_version", &s); ++ } ++ if let Some(s) = self.iconv_version_num() { ++ f.field("iconv_version_num", &format!("{:x}", s)); ++ } ++ if let Some(s) = self.libssh_version() { ++ f.field("libssh_version", &s); ++ } ++ ++ f.field("protocols", &self.protocols().collect::>()); ++ ++ f.finish() ++ } ++} ++ ++impl<'a> Iterator for Protocols<'a> { ++ type Item = &'a str; ++ ++ fn next(&mut self) -> Option<&'a str> { ++ unsafe { ++ if (*self.cur).is_null() { ++ return None ++ } ++ let ret = ::opt_str(*self.cur).unwrap(); ++ self.cur = self.cur.offset(1); ++ Some(ret) ++ } ++ } ++} ++ ++impl<'a> fmt::Debug for Protocols<'a> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_list() ++ .entries(self.clone()) ++ .finish() ++ } ++} diff --cc vendor/curl-0.4.14/tests/easy.rs index 000000000,000000000..09add3149 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/tests/easy.rs @@@ -1,0 -1,0 +1,693 @@@ ++extern crate curl; ++ ++use std::cell::{RefCell, Cell}; ++use std::io::Read; ++use std::rc::Rc; ++use std::str; ++use std::time::Duration; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(e) => e, ++ Err(e) => panic!("{} failed with {:?}", stringify!($e), e), ++ }) ++} ++ ++use curl::easy::{Easy, List, WriteError, ReadError, Transfer}; ++ ++use server::Server; ++mod server; ++ ++fn handle() -> Easy { ++ let mut e = Easy::new(); ++ t!(e.timeout(Duration::new(20, 0))); ++ return e ++} ++ ++fn sink(data: &[u8]) -> Result { ++ Ok(data.len()) ++} ++ ++#[test] ++fn get_smoke() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut handle = handle(); ++ t!(handle.url(&s.url("/"))); ++ t!(handle.perform()); ++} ++ ++#[test] ++fn get_path() { ++ let s = Server::new(); ++ s.receive("\ ++GET /foo HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut handle = handle(); ++ t!(handle.url(&s.url("/foo"))); ++ t!(handle.perform()); ++} ++ ++#[test] ++fn write_callback() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\nhello!"); ++ ++ let mut all = Vec::::new(); ++ { ++ let mut handle = handle(); ++ t!(handle.url(&s.url("/"))); ++ let mut handle = handle.transfer(); ++ t!(handle.write_function(|data| { ++ all.extend(data); ++ Ok(data.len()) ++ })); ++ t!(handle.perform()); ++ } ++ assert_eq!(all, b"hello!"); ++} ++ ++#[test] ++fn resolve() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: example.com:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut list = List::new(); ++ t!(list.append(&format!("example.com:{}:127.0.0.1", s.addr().port()))); ++ let mut handle = handle(); ++ t!(handle.url(&format!("http://example.com:{}/", s.addr().port()))); ++ t!(handle.resolve(list)); ++ t!(handle.perform()); ++} ++ ++#[test] ++fn progress() { ++ let s = Server::new(); ++ s.receive("\ ++GET /foo HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\nHello!"); ++ ++ let mut hits = 0; ++ let mut dl = 0.0; ++ { ++ let mut handle = handle(); ++ t!(handle.url(&s.url("/foo"))); ++ t!(handle.progress(true)); ++ t!(handle.write_function(sink)); ++ ++ let mut handle = handle.transfer(); ++ t!(handle.progress_function(|_, a, _, _| { ++ hits += 1; ++ dl = a; ++ true ++ })); ++ t!(handle.perform()); ++ } ++ assert!(hits > 0); ++ assert_eq!(dl, 6.0); ++} ++ ++#[test] ++fn headers() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++Foo: bar\r\n\ ++Bar: baz\r\n\ ++\r\n ++Hello!"); ++ ++ let mut headers = Vec::new(); ++ { ++ let mut handle = handle(); ++ t!(handle.url(&s.url("/"))); ++ ++ let mut handle = handle.transfer(); ++ t!(handle.header_function(|h| { ++ headers.push(str::from_utf8(h).unwrap().to_string()); ++ true ++ })); ++ t!(handle.write_function(sink)); ++ t!(handle.perform()); ++ } ++ assert_eq!(headers, vec![ ++ "HTTP/1.1 200 OK\r\n".to_string(), ++ "Foo: bar\r\n".to_string(), ++ "Bar: baz\r\n".to_string(), ++ "\r\n".to_string(), ++ ]); ++} ++ ++#[test] ++fn fail_on_error() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 401 Not so good\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.fail_on_error(true)); ++ assert!(h.perform().is_err()); ++ ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 401 Not so good\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.fail_on_error(false)); ++ t!(h.perform()); ++} ++ ++#[test] ++fn port() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: localhost:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url("http://localhost/")); ++ t!(h.port(s.addr().port())); ++ t!(h.perform()); ++} ++ ++#[test] ++fn proxy() { ++ let s = Server::new(); ++ s.receive("\ ++GET http://example.com/ HTTP/1.1\r\n\ ++Host: example.com\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url("http://example.com/")); ++ t!(h.proxy(&s.url("/"))); ++ t!(h.perform()); ++} ++ ++#[test] ++#[ignore] // fails on newer curl versions? seems benign ++fn noproxy() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.proxy(&s.url("/"))); ++ t!(h.noproxy("127.0.0.1")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn misc() { ++ let mut h = handle(); ++ t!(h.tcp_nodelay(true)); ++ // t!(h.tcp_keepalive(true)); ++ // t!(h.tcp_keepidle(Duration::new(3, 0))); ++ // t!(h.tcp_keepintvl(Duration::new(3, 0))); ++ t!(h.buffer_size(10)); ++ t!(h.dns_cache_timeout(Duration::new(1, 0))); ++} ++ ++#[test] ++fn userpass() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Authorization: Basic YmFyOg==\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.username("foo")); ++ t!(h.username("bar")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn accept_encoding() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Accept-Encoding: gzip\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.accept_encoding("gzip")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn follow_location() { ++ let s1 = Server::new(); ++ let s2 = Server::new(); ++ s1.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s1.send(&format!("\ ++HTTP/1.1 301 Moved Permanently\r\n\ ++Location: http://{}/foo\r\n\ ++\r\n", s2.addr())); ++ ++ s2.receive("\ ++GET /foo HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s2.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s1.url("/"))); ++ t!(h.follow_location(true)); ++ t!(h.perform()); ++} ++ ++#[test] ++fn put() { ++ let s = Server::new(); ++ s.receive("\ ++PUT / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 5\r\n\ ++\r\n\ ++data\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut data = "data\n".as_bytes(); ++ let mut list = List::new(); ++ t!(list.append("Expect:")); ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.put(true)); ++ t!(h.in_filesize(5)); ++ t!(h.upload(true)); ++ t!(h.http_headers(list)); ++ let mut h = h.transfer(); ++ t!(h.read_function(|buf| { ++ Ok(data.read(buf).unwrap()) ++ })); ++ t!(h.perform()); ++} ++ ++#[test] ++fn post1() { ++ let s = Server::new(); ++ s.receive("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 5\r\n\ ++Content-Type: application/x-www-form-urlencoded\r\n\ ++\r\n\ ++data\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.post(true)); ++ t!(h.post_fields_copy(b"data\n")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn post2() { ++ let s = Server::new(); ++ s.receive("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 5\r\n\ ++Content-Type: application/x-www-form-urlencoded\r\n\ ++\r\n\ ++data\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.post(true)); ++ t!(h.post_fields_copy(b"data\n")); ++ t!(h.write_function(sink)); ++ t!(h.perform()); ++} ++ ++#[test] ++fn post3() { ++ let s = Server::new(); ++ s.receive("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 5\r\n\ ++Content-Type: application/x-www-form-urlencoded\r\n\ ++\r\n\ ++data\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut data = "data\n".as_bytes(); ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.post(true)); ++ t!(h.post_field_size(5)); ++ let mut h = h.transfer(); ++ t!(h.read_function(|buf| { ++ Ok(data.read(buf).unwrap()) ++ })); ++ t!(h.perform()); ++} ++ ++#[test] ++fn referer() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Referer: foo\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.referer("foo")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn useragent() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++User-Agent: foo\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.useragent("foo")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn custom_headers() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Foo: bar\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut custom = List::new(); ++ t!(custom.append("Foo: bar")); ++ t!(custom.append("Accept:")); ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.http_headers(custom)); ++ t!(h.perform()); ++} ++ ++#[test] ++fn cookie() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Cookie: foo\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.cookie("foo")); ++ t!(h.perform()); ++} ++ ++#[test] ++fn url_encoding() { ++ let mut h = handle(); ++ assert_eq!(h.url_encode(b"foo"), "foo"); ++ assert_eq!(h.url_encode(b"foo bar"), "foo%20bar"); ++ assert_eq!(h.url_encode(b"foo bar\xff"), "foo%20bar%FF"); ++ assert_eq!(h.url_encode(b""), ""); ++ assert_eq!(h.url_decode("foo"), b"foo"); ++ assert_eq!(h.url_decode("foo%20bar"), b"foo bar"); ++ assert_eq!(h.url_decode("foo%2"), b"foo%2"); ++ assert_eq!(h.url_decode("foo%xx"), b"foo%xx"); ++ assert_eq!(h.url_decode("foo%ff"), b"foo\xff"); ++ assert_eq!(h.url_decode(""), b""); ++} ++ ++#[test] ++fn getters() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.cookie_file("/dev/null")); ++ t!(h.perform()); ++ assert_eq!(t!(h.response_code()), 200); ++ assert_eq!(t!(h.redirect_count()), 0); ++ assert_eq!(t!(h.redirect_url()), None); ++ assert_eq!(t!(h.content_type()), None); ++ ++ let addr = format!("http://{}/", s.addr()); ++ assert_eq!(t!(h.effective_url()), Some(&addr[..])); ++ ++ // TODO: test this ++ // let cookies = t!(h.cookies()).iter() ++ // .map(|s| s.to_vec()) ++ // .collect::>(); ++ // assert_eq!(cookies.len(), 1); ++} ++ ++#[test] ++#[should_panic] ++fn panic_in_callback() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.header_function(|_| panic!())); ++ t!(h.perform()); ++} ++ ++#[test] ++fn abort_read() { ++ let s = Server::new(); ++ s.receive("\ ++PUT / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 2\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.read_function(|_| Err(ReadError::Abort))); ++ t!(h.put(true)); ++ t!(h.in_filesize(2)); ++ let mut list = List::new(); ++ t!(list.append("Expect:")); ++ t!(h.http_headers(list)); ++ let err = h.perform().unwrap_err(); ++ assert!(err.is_aborted_by_callback()); ++} ++ ++#[test] ++fn pause_write_then_resume() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n ++a\n ++b"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.progress(true)); ++ ++ struct State<'a, 'b> { ++ paused: Cell, ++ unpaused: Cell, ++ transfer: RefCell>, ++ } ++ ++ let h = Rc::new(State { ++ paused: Cell::new(false), ++ unpaused: Cell::new(false), ++ transfer: RefCell::new(h.transfer()), ++ }); ++ ++ let h2 = h.clone(); ++ t!(h.transfer.borrow_mut().write_function(move |data| { ++ if h2.unpaused.get() { ++ h2.unpaused.set(false); ++ Ok(data.len()) ++ } else { ++ h2.paused.set(true); ++ Err(WriteError::Pause) ++ } ++ })); ++ let h2 = h.clone(); ++ t!(h.transfer.borrow_mut().progress_function(move |_, _, _, _| { ++ if h2.paused.get() { ++ h2.paused.set(false); ++ h2.unpaused.set(true); ++ t!(h2.transfer.borrow().unpause_write()); ++ } ++ true ++ })); ++ t!(h.transfer.borrow().perform()); ++} ++ ++#[test] ++fn perform_in_perform_is_bad() { ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n ++a\n ++b"); ++ ++ let mut h = handle(); ++ t!(h.url(&s.url("/"))); ++ t!(h.progress(true)); ++ ++ let h = Rc::new(RefCell::new(h.transfer())); ++ ++ let h2 = h.clone(); ++ t!(h.borrow_mut().write_function(move |data| { ++ assert!(h2.borrow().perform().is_err()); ++ Ok(data.len()) ++ })); ++ t!(h.borrow().perform()); ++} ++ ++// Stupid test to check if unix_socket is callable ++#[test] ++fn check_unix_socket() { ++ let mut h = handle(); ++ h.unix_socket("/var/something.socks").is_ok(); ++} ++ diff --cc vendor/curl-0.4.14/tests/formdata index 000000000,000000000..ce0136250 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/tests/formdata @@@ -1,0 -1,0 +1,1 @@@ ++hello diff --cc vendor/curl-0.4.14/tests/multi.rs index 000000000,000000000..c6eeea008 new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/tests/multi.rs @@@ -1,0 -1,0 +1,253 @@@ ++#![cfg(unix)] ++ ++extern crate curl; ++extern crate mio; ++extern crate mio_extras; ++ ++use std::collections::HashMap; ++use std::io::{Read, Cursor}; ++use std::time::Duration; ++ ++use curl::easy::{Easy, List}; ++use curl::multi::Multi; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(e) => e, ++ Err(e) => panic!("{} failed with {:?}", stringify!($e), e), ++ }) ++} ++ ++use server::Server; ++mod server; ++ ++#[test] ++fn smoke() { ++ let m = Multi::new(); ++ let mut e = Easy::new(); ++ ++ let s = Server::new(); ++ s.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ t!(e.url(&s.url("/"))); ++ let _e = t!(m.add(e)); ++ while t!(m.perform()) > 0 { ++ t!(m.wait(&mut [], Duration::from_secs(1))); ++ } ++} ++ ++#[test] ++fn smoke2() { ++ let m = Multi::new(); ++ ++ let s1 = Server::new(); ++ s1.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s1.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let s2 = Server::new(); ++ s2.receive("\ ++GET / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++\r\n"); ++ s2.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut e1 = Easy::new(); ++ t!(e1.url(&s1.url("/"))); ++ let _e1 = t!(m.add(e1)); ++ let mut e2 = Easy::new(); ++ t!(e2.url(&s2.url("/"))); ++ let _e2 = t!(m.add(e2)); ++ ++ while t!(m.perform()) > 0 { ++ t!(m.wait(&mut [], Duration::from_secs(1))); ++ } ++ ++ let mut done = 0; ++ m.messages(|msg| { ++ msg.result().unwrap().unwrap(); ++ done += 1; ++ }); ++ assert_eq!(done, 2); ++} ++ ++#[test] ++fn upload_lots() { ++ use curl::multi::{Socket, SocketEvents, Events}; ++ ++ #[derive(Debug)] ++ enum Message { ++ Timeout(Option), ++ Wait(Socket, SocketEvents, usize), ++ } ++ ++ let mut m = Multi::new(); ++ let poll = t!(mio::Poll::new()); ++ let (tx, rx) = mio_extras::channel::channel(); ++ let tx2 = tx.clone(); ++ t!(m.socket_function(move |socket, events, token| { ++ t!(tx2.send(Message::Wait(socket, events, token))); ++ })); ++ t!(m.timer_function(move |dur| { ++ t!(tx.send(Message::Timeout(dur))); ++ true ++ })); ++ ++ let s = Server::new(); ++ s.receive(&format!("\ ++PUT / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 131072\r\n\ ++\r\n\ ++{}\n", vec!["a"; 128 * 1024 - 1].join(""))); ++ s.send("\ ++HTTP/1.1 200 OK\r\n\ ++\r\n"); ++ ++ let mut data = vec![b'a'; 128 * 1024 - 1]; ++ data.push(b'\n'); ++ let mut data = Cursor::new(data); ++ let mut list = List::new(); ++ t!(list.append("Expect:")); ++ let mut h = Easy::new(); ++ t!(h.url(&s.url("/"))); ++ t!(h.put(true)); ++ t!(h.read_function(move |buf| { ++ Ok(data.read(buf).unwrap()) ++ })); ++ t!(h.in_filesize(128 * 1024)); ++ t!(h.upload(true)); ++ t!(h.http_headers(list)); ++ ++ t!(poll.register(&rx, ++ mio::Token(0), ++ mio::Ready::all(), ++ mio::PollOpt::level())); ++ ++ let e = t!(m.add(h)); ++ ++ assert!(t!(m.perform()) > 0); ++ let mut next_token = 1; ++ let mut token_map = HashMap::new(); ++ let mut cur_timeout = None; ++ let mut events = mio::Events::with_capacity(128); ++ let mut running = true; ++ ++ while running { ++ let n = t!(poll.poll(&mut events, cur_timeout)); ++ ++ if n == 0 { ++ if t!(m.timeout()) == 0 { ++ running = false; ++ } ++ } ++ ++ for event in events.iter() { ++ while event.token() == mio::Token(0) { ++ match rx.try_recv() { ++ Ok(Message::Timeout(dur)) => cur_timeout = dur, ++ Ok(Message::Wait(socket, events, token)) => { ++ let evented = mio::unix::EventedFd(&socket); ++ if events.remove() { ++ token_map.remove(&token).unwrap(); ++ } else { ++ let mut e = mio::Ready::none(); ++ if events.input() { ++ e = e | mio::Ready::readable(); ++ } ++ if events.output() { ++ e = e | mio::Ready::writable(); ++ } ++ if token == 0 { ++ let token = next_token; ++ next_token += 1; ++ t!(m.assign(socket, token)); ++ token_map.insert(token, socket); ++ t!(poll.register(&evented, ++ mio::Token(token), ++ e, ++ mio::PollOpt::level())); ++ } else { ++ t!(poll.reregister(&evented, ++ mio::Token(token), ++ e, ++ mio::PollOpt::level())); ++ } ++ } ++ } ++ Err(_) => break, ++ } ++ } ++ ++ if event.token() == mio::Token(0) { ++ continue ++ } ++ ++ let token = event.token(); ++ let socket = token_map[&token.into()]; ++ let mut e = Events::new(); ++ if event.kind().is_readable() { ++ e.input(true); ++ } ++ if event.kind().is_writable() { ++ e.output(true); ++ } ++ if event.kind().is_error() { ++ e.error(true); ++ } ++ let remaining = t!(m.action(socket, &e)); ++ if remaining == 0 { ++ running = false; ++ } ++ } ++ } ++ ++ let mut done = 0; ++ m.messages(|m| { ++ m.result().unwrap().unwrap(); ++ done += 1; ++ }); ++ assert_eq!(done, 1); ++ ++ let mut e = t!(m.remove(e)); ++ assert_eq!(t!(e.response_code()), 200); ++} ++ ++// Tests passing raw file descriptors to Multi::wait. The test is limited to Linux only as the ++// semantics of the underlying poll(2) system call used by curl apparently differ on other ++// platforms, making the test fail. ++#[cfg(target_os = "linux")] ++#[test] ++fn waitfds() { ++ use std::fs::File; ++ use std::os::unix::io::AsRawFd; ++ use curl::multi::WaitFd; ++ ++ let filenames = ["/dev/null", "/dev/zero", "/dev/urandom"]; ++ let files: Vec = filenames.iter() ++ .map(|filename| File::open(filename).unwrap()) ++ .collect(); ++ let mut waitfds: Vec = files.iter().map(|f| { ++ let mut waitfd = WaitFd::new(); ++ waitfd.set_fd(f.as_raw_fd()); ++ waitfd.poll_on_read(true); ++ waitfd ++ }).collect(); ++ ++ let m = Multi::new(); ++ let events = t!(m.wait(&mut waitfds, Duration::from_secs(1))); ++ assert_eq!(events, 3); ++ for waitfd in waitfds { ++ assert!(waitfd.received_read()); ++ } ++} diff --cc vendor/curl-0.4.14/tests/post.rs index 000000000,000000000..e13a5ec3f new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/tests/post.rs @@@ -1,0 -1,0 +1,108 @@@ ++extern crate curl; ++ ++use std::time::Duration; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(e) => e, ++ Err(e) => panic!("{} failed with {:?}", stringify!($e), e), ++ }) ++} ++ ++use curl::easy::{Easy, List, Form}; ++ ++use server::Server; ++mod server; ++ ++fn handle() -> Easy { ++ let mut e = Easy::new(); ++ t!(e.timeout(Duration::new(20, 0))); ++ let mut list = List::new(); ++ t!(list.append("Expect:")); ++ t!(e.http_headers(list)); ++ return e ++} ++ ++#[test] ++fn custom() { ++ let s = Server::new(); ++ s.receive("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 142\r\n\ ++Content-Type: multipart/form-data; boundary=--[..]\r\n\ ++\r\n\ ++--[..]\r\n\ ++Content-Disposition: form-data; name=\"foo\"\r\n\ ++\r\n\ ++1234\r\n\ ++--[..]\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut handle = handle(); ++ let mut form = Form::new(); ++ t!(form.part("foo").contents(b"1234").add()); ++ t!(handle.url(&s.url("/"))); ++ t!(handle.httppost(form)); ++ t!(handle.perform()); ++} ++ ++#[test] ++fn buffer() { ++ let s = Server::new(); ++ s.receive("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: 181\r\n\ ++Content-Type: multipart/form-data; boundary=--[..]\r\n\ ++\r\n\ ++--[..]\r\n\ ++Content-Disposition: form-data; name=\"foo\"; filename=\"bar\"\r\n\ ++Content-Type: foo/bar\r\n\ ++\r\n\ ++1234\r\n\ ++--[..]\r\n"); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut handle = handle(); ++ let mut form = Form::new(); ++ t!(form.part("foo") ++ .buffer("bar", b"1234".to_vec()) ++ .content_type("foo/bar") ++ .add()); ++ t!(handle.url(&s.url("/"))); ++ t!(handle.httppost(form)); ++ t!(handle.perform()); ++} ++ ++#[test] ++fn file() { ++ let s = Server::new(); ++ let formdata = include_str!("formdata"); ++ s.receive(format!("\ ++POST / HTTP/1.1\r\n\ ++Host: 127.0.0.1:$PORT\r\n\ ++Accept: */*\r\n\ ++Content-Length: {}\r\n\ ++Content-Type: multipart/form-data; boundary=--[..]\r\n\ ++\r\n\ ++--[..]\r\n\ ++Content-Disposition: form-data; name=\"foo\"; filename=\"formdata\"\r\n\ ++Content-Type: application/octet-stream\r\n\ ++\r\n\ ++{}\ ++\r\n\ ++--[..]\r\n", 199 + formdata.len(), formdata).as_str()); ++ s.send("HTTP/1.1 200 OK\r\n\r\n"); ++ ++ let mut handle = handle(); ++ let mut form = Form::new(); ++ t!(form.part("foo") ++ .file("tests/formdata") ++ .add()); ++ t!(handle.url(&s.url("/"))); ++ t!(handle.httppost(form)); ++ t!(handle.perform()); ++} diff --cc vendor/curl-0.4.14/tests/server/mod.rs index 000000000,000000000..445cf901e new file mode 100644 --- /dev/null +++ b/vendor/curl-0.4.14/tests/server/mod.rs @@@ -1,0 -1,0 +1,175 @@@ ++#![allow(dead_code)] ++ ++use std::collections::HashSet; ++use std::net::{TcpListener, SocketAddr, TcpStream}; ++use std::io::prelude::*; ++use std::thread; ++use std::sync::mpsc::{Sender, Receiver, channel}; ++use std::io::BufReader; ++ ++pub struct Server { ++ messages: Option>, ++ addr: SocketAddr, ++ thread: Option>, ++} ++ ++enum Message { ++ Read(String), ++ Write(String), ++} ++ ++fn run(listener: &TcpListener, rx: &Receiver) { ++ let mut socket = BufReader::new(listener.accept().unwrap().0); ++ for msg in rx.iter() { ++ match msg { ++ Message::Read(ref expected) => { ++ let mut expected = &expected[..]; ++ let mut expected_headers = HashSet::new(); ++ while let Some(i) = expected.find("\n") { ++ let line = &expected[..i + 1]; ++ expected = &expected[i + 1..]; ++ expected_headers.insert(line); ++ if line == "\r\n" { ++ break ++ } ++ } ++ ++ let mut expected_len = None; ++ while expected_headers.len() > 0 { ++ let mut actual = String::new(); ++ t!(socket.read_line(&mut actual)); ++ if actual.starts_with("Content-Length") { ++ let len = actual.split(": ").skip(1).next().unwrap(); ++ expected_len = len.trim().parse().ok(); ++ } ++ // various versions of libcurl do different things here ++ if actual == "Proxy-Connection: Keep-Alive\r\n" { ++ continue ++ } ++ if expected_headers.remove(&actual[..]) { ++ continue ++ } ++ ++ let mut found = None; ++ for header in expected_headers.iter() { ++ if lines_match(header, &actual) { ++ found = Some(header.clone()); ++ break ++ } ++ } ++ if let Some(found) = found { ++ expected_headers.remove(&found); ++ continue ++ } ++ panic!("unexpected header: {:?} (remaining headers {:?})", ++ actual, expected_headers); ++ } ++ for header in expected_headers { ++ panic!("expected header but not found: {:?}", header); ++ } ++ ++ let mut line = String::new(); ++ let mut socket = match expected_len { ++ Some(amt) => socket.by_ref().take(amt), ++ None => socket.by_ref().take(expected.len() as u64), ++ }; ++ while socket.limit() > 0 { ++ line.truncate(0); ++ t!(socket.read_line(&mut line)); ++ if line.len() == 0 { ++ break ++ } ++ if expected.len() == 0 { ++ panic!("unexpected line: {:?}", line); ++ } ++ let i = expected.find("\n").unwrap_or(expected.len() - 1); ++ let expected_line = &expected[..i + 1]; ++ expected = &expected[i + 1..]; ++ if lines_match(expected_line, &line) { ++ continue ++ } ++ panic!("lines didn't match:\n\ ++ expected: {:?}\n\ ++ actual: {:?}\n", expected_line, line) ++ } ++ if expected.len() != 0 { ++ println!("didn't get expected data: {:?}", expected); ++ } ++ } ++ Message::Write(ref to_write) => { ++ t!(socket.get_mut().write_all(to_write.as_bytes())); ++ return ++ } ++ } ++ } ++ ++ let mut dst = Vec::new(); ++ t!(socket.read_to_end(&mut dst)); ++ assert!(dst.len() == 0); ++} ++ ++fn lines_match(expected: &str, mut actual: &str) -> bool { ++ for (i, part) in expected.split("[..]").enumerate() { ++ match actual.find(part) { ++ Some(j) => { ++ if i == 0 && j != 0 { ++ return false ++ } ++ actual = &actual[j + part.len()..]; ++ } ++ None => { ++ return false ++ } ++ } ++ } ++ actual.is_empty() || expected.ends_with("[..]") ++} ++ ++impl Server { ++ pub fn new() -> Server { ++ let listener = t!(TcpListener::bind("127.0.0.1:0")); ++ let addr = t!(listener.local_addr()); ++ let (tx, rx) = channel(); ++ let thread = thread::spawn(move || run(&listener, &rx)); ++ Server { ++ messages: Some(tx), ++ addr: addr, ++ thread: Some(thread), ++ } ++ } ++ ++ pub fn receive(&self, msg: &str) { ++ let msg = msg.replace("$PORT", &self.addr.port().to_string()); ++ self.msg(Message::Read(msg)); ++ } ++ ++ pub fn send(&self, msg: &str) { ++ let msg = msg.replace("$PORT", &self.addr.port().to_string()); ++ self.msg(Message::Write(msg)); ++ } ++ ++ fn msg(&self, msg: Message) { ++ t!(self.messages.as_ref().unwrap().send(msg)); ++ } ++ ++ pub fn addr(&self) -> &SocketAddr { ++ &self.addr ++ } ++ ++ pub fn url(&self, path: &str) -> String { ++ format!("http://{}{}", self.addr, path) ++ } ++} ++ ++impl Drop for Server { ++ fn drop(&mut self) { ++ drop(TcpStream::connect(&self.addr)); ++ drop(self.messages.take()); ++ let res = self.thread.take().unwrap().join(); ++ if !thread::panicking() { ++ t!(res); ++ } else if let Err(e) = res { ++ println!("child server thread also failed: {:?}", e); ++ } ++ } ++} diff --cc vendor/curl-sys-0.4.8/.cargo-checksum.json index 000000000,000000000..1c42e12a3 new file mode 100644 --- /dev/null +++ b/vendor/curl-sys-0.4.8/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"981bd902fcd8b8b999cf71b81447e27d66c3493a7f62f1372866fd32986c0c82"} diff --cc vendor/curl-sys-0.4.8/Cargo.toml index 000000000,000000000..9112de9e7 new file mode 100644 --- /dev/null +++ b/vendor/curl-sys-0.4.8/Cargo.toml @@@ -1,0 -1,0 +1,49 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "curl-sys" ++version = "0.4.8" ++authors = ["Carl Lerche ", "Alex Crichton "] ++build = "build.rs" ++links = "curl" ++description = "Native bindings to the libcurl library" ++documentation = "https://docs.rs/curl-sys" ++categories = ["external-ffi-bindings"] ++license = "MIT" ++repository = "https://github.com/alexcrichton/curl-rust" ++ ++[lib] ++name = "curl_sys" ++path = "lib.rs" ++[dependencies.libc] ++version = "0.2.2" ++ ++[dependencies.libz-sys] ++version = "1.0.18" ++[build-dependencies.cc] ++version = "1.0" ++ ++[build-dependencies.pkg-config] ++version = "0.3.3" ++[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] ++version = "0.9" ++[target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] ++version = "0.2" ++[target."cfg(windows)".dependencies.winapi] ++version = "0.3" ++features = ["winsock2", "ws2def"] ++[badges.appveyor] ++repository = "alexcrichton/curl-rust" ++ ++[badges.travis-ci] ++repository = "alexcrichton/curl-rust" diff --cc vendor/curl-sys-0.4.8/LICENSE index 000000000,000000000..5f5e4b09d new file mode 100644 --- /dev/null +++ b/vendor/curl-sys-0.4.8/LICENSE @@@ -1,0 -1,0 +1,19 @@@ ++Copyright (c) 2014 Carl Lerche ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. diff --cc vendor/curl-sys-0.4.8/build.rs index 000000000,000000000..e9f1894f2 new file mode 100644 --- /dev/null +++ b/vendor/curl-sys-0.4.8/build.rs @@@ -1,0 -1,0 +1,426 @@@ ++extern crate pkg_config; ++#[cfg(target_env = "msvc")] ++extern crate vcpkg; ++extern crate cc; ++ ++#[allow(unused_imports, deprecated)] ++use std::ascii::AsciiExt; ++use std::env; ++use std::ffi::OsString; ++use std::fs; ++use std::path::{PathBuf, Path, Component, Prefix}; ++use std::process::Command; ++use std::io::ErrorKind; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(t) => t, ++ Err(e) => panic!("{} return the error {}", stringify!($e), e), ++ }) ++} ++ ++fn main() { ++ let target = env::var("TARGET").unwrap(); ++ let host = env::var("HOST").unwrap(); ++ let src = env::current_dir().unwrap(); ++ let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); ++ let windows = target.contains("windows"); ++ ++ // OSX and Haiku ships libcurl by default, so we just use that version ++ // unconditionally. ++ if target.contains("apple") || target.contains("haiku") { ++ return println!("cargo:rustc-flags=-l curl"); ++ } ++ ++ // Illumos/Solaris requires explicit linking with libnsl ++ if target.contains("solaris") { ++ println!("cargo:rustc-flags=-l nsl"); ++ } ++ ++ // Next, fall back and try to use pkg-config if its available. ++ if !target.contains("windows") { ++ match pkg_config::find_library("libcurl") { ++ Ok(lib) => { ++ for path in lib.include_paths.iter() { ++ println!("cargo:include={}", path.display()); ++ } ++ return ++ } ++ Err(e) => println!("Couldn't find libcurl from \ ++ pkgconfig ({:?}), compiling it from source...", e), ++ } ++ } ++ ++ if try_vcpkg() { ++ return; ++ } ++ ++ if !Path::new("curl/.git").exists() { ++ let _ = Command::new("git").args(&["submodule", "update", "--init"]) ++ .status(); ++ } ++ ++ println!("cargo:rustc-link-search=native={}/lib", dst.display()); ++ println!("cargo:rustc-link-lib=static=curl"); ++ println!("cargo:root={}", dst.display()); ++ println!("cargo:include={}/include", dst.display()); ++ if windows { ++ println!("cargo:rustc-link-lib=ws2_32"); ++ println!("cargo:rustc-link-lib=crypt32"); ++ } ++ ++ // MSVC builds are just totally different ++ if target.contains("msvc") { ++ return build_msvc(&target); ++ } ++ ++ let openssl_root = register_dep("OPENSSL"); ++ let zlib_root = register_dep("Z"); ++ let nghttp2_root = register_dep("NGHTTP2"); ++ ++ let cfg = cc::Build::new(); ++ let compiler = cfg.get_compiler(); ++ ++ let _ = fs::create_dir(&dst.join("build")); ++ ++ let mut cmd = Command::new("sh"); ++ let mut cflags = OsString::new(); ++ for arg in compiler.args() { ++ cflags.push(arg); ++ cflags.push(" "); ++ } ++ ++ // Can't run ./configure directly on msys2 b/c we're handing in ++ // Windows-style paths (those starting with C:\), but it chokes on those. ++ // For that reason we build up a shell script with paths converted to ++ // posix versions hopefully... ++ // ++ // Also apparently the buildbots choke unless we manually set LD, who knows ++ // why?! ++ cmd.env("CC", compiler.path()) ++ .env("CFLAGS", cflags) ++ .env("LD", &which("ld").unwrap()) ++ .env("VERBOSE", "1") ++ .current_dir(&dst.join("build")) ++ .arg(msys_compatible(&src.join("curl/configure"))); ++ ++ // For now this build script doesn't support paths with spaces in them. This ++ // is arguably a but in curl's configure script, but we could also try to ++ // paper over it by using a tmp directory which *doesn't* have spaces in it. ++ // As of now though that's not implemented so just give a nicer error for ++ // the time being. ++ let wants_space_error = windows && ++ (dst.to_str().map(|s| s.contains(" ")).unwrap_or(false) || ++ src.to_str().map(|s| s.contains(" ")).unwrap_or(false)); ++ if wants_space_error { ++ panic!("\n\nunfortunately ./configure of libcurl is known to \ ++ fail if there's a space in the path to the current \ ++ directory\n\n\ ++ there's a space in either\n {}\n {}\nand this will cause the \ ++ build to fail\n\n\ ++ the MSVC build should work with a directory that has \ ++ spaces in it, and it would also work to move this to a \ ++ different directory without spaces\n\n", ++ src.display(), dst.display()) ++ } ++ ++ if windows { ++ cmd.arg("--with-winssl"); ++ } else { ++ cmd.arg("--without-ca-bundle"); ++ cmd.arg("--without-ca-path"); ++ } ++ if let Some(root) = openssl_root { ++ cmd.arg(format!("--with-ssl={}", msys_compatible(&root))); ++ } ++ if let Some(root) = zlib_root { ++ cmd.arg(format!("--with-zlib={}", msys_compatible(&root))); ++ } ++ cmd.arg("--enable-static=yes"); ++ cmd.arg("--enable-shared=no"); ++ match &env::var("PROFILE").unwrap()[..] { ++ "bench" | "release" => { ++ cmd.arg("--enable-optimize"); ++ } ++ _ => { ++ cmd.arg("--enable-debug"); ++ cmd.arg("--disable-optimize"); ++ } ++ } ++ cmd.arg(format!("--prefix={}", msys_compatible(&dst))); ++ ++ if target != host && ++ (!target.contains("windows") || !host.contains("windows")) { ++ // NOTE GNU terminology ++ // BUILD = machine where we are (cross) compiling curl ++ // HOST = machine where the compiled curl will be used ++ // TARGET = only relevant when compiling compilers ++ if target.contains("windows") { ++ // curl's configure can't parse `-windows-` triples when used ++ // as `--host`s. In those cases we use this combination of ++ // `host` and `target` that appears to do the right thing. ++ cmd.arg(format!("--host={}", host)); ++ cmd.arg(format!("--target={}", target)); ++ } else { ++ cmd.arg(format!("--build={}", host)); ++ cmd.arg(format!("--host={}", target)); ++ } ++ } ++ ++ if let Some(root) = nghttp2_root { ++ cmd.arg(format!("--with-nghttp2={}", msys_compatible(&root))); ++ } else { ++ cmd.arg("--without-nghttp2"); ++ } ++ ++ cmd.arg("--without-librtmp"); ++ cmd.arg("--without-libidn2"); ++ cmd.arg("--without-libssh2"); ++ cmd.arg("--without-libpsl"); ++ cmd.arg("--disable-ldap"); ++ cmd.arg("--disable-ldaps"); ++ cmd.arg("--disable-ftp"); ++ cmd.arg("--disable-rtsp"); ++ cmd.arg("--disable-dict"); ++ cmd.arg("--disable-telnet"); ++ cmd.arg("--disable-tftp"); ++ cmd.arg("--disable-pop3"); ++ cmd.arg("--disable-imap"); ++ cmd.arg("--disable-smtp"); ++ cmd.arg("--disable-gopher"); ++ cmd.arg("--disable-manual"); ++ cmd.arg("--disable-smb"); ++ cmd.arg("--disable-sspi"); ++ cmd.arg("--disable-manual"); ++ cmd.arg("--disable-unix-sockets"); ++ cmd.arg("--disable-versioned-symbols"); ++ cmd.arg("--enable-hidden-symbols"); ++ cmd.arg("--disable-libcurl-option"); ++ ++ run(&mut cmd, "sh"); ++ run(make() ++ .arg(&format!("-j{}", env::var("NUM_JOBS").unwrap())) ++ .current_dir(&dst.join("build")), "make"); ++ run(make() ++ .arg("install") ++ .current_dir(&dst.join("build")), "make"); ++} ++ ++fn run(cmd: &mut Command, program: &str) { ++ println!("running: {:?}", cmd); ++ let status = match cmd.status() { ++ Ok(status) => status, ++ Err(ref e) if e.kind() == ErrorKind::NotFound => { ++ fail(&format!("failed to execute command: {}\nis `{}` not installed?", ++ e, program)); ++ } ++ Err(e) => fail(&format!("failed to execute command: {}", e)), ++ }; ++ if !status.success() { ++ fail(&format!("command did not execute successfully, got: {}", status)); ++ } ++} ++ ++fn fail(s: &str) -> ! { ++ panic!("\n{}\n\nbuild script failed, must exit now", s) ++} ++ ++fn make() -> Command { ++ let cmd = if cfg!(target_os = "freebsd") {"gmake"} else {"make"}; ++ let mut cmd = Command::new(cmd); ++ // We're using the MSYS make which doesn't work with the mingw32-make-style ++ // MAKEFLAGS, so remove that from the env if present. ++ if cfg!(windows) { ++ cmd.env_remove("MAKEFLAGS").env_remove("MFLAGS"); ++ } ++ return cmd ++} ++ ++fn which(cmd: &str) -> Option { ++ let cmd = format!("{}{}", cmd, env::consts::EXE_SUFFIX); ++ let paths = env::var_os("PATH").unwrap(); ++ env::split_paths(&paths).map(|p| p.join(&cmd)).find(|p| { ++ fs::metadata(p).is_ok() ++ }) ++} ++ ++fn msys_compatible(path: &Path) -> String { ++ let mut path_string = path.to_str().unwrap().to_string(); ++ if !cfg!(windows) { ++ return path_string; ++ } ++ ++ // Replace e.g. C:\ with /c/ ++ if let Component::Prefix(prefix_component) = path.components().next().unwrap() { ++ if let Prefix::Disk(disk) = prefix_component.kind() { ++ let from = format!("{}:\\", disk as char); ++ let to = format!("/{}/", (disk as char).to_ascii_lowercase()); ++ path_string = path_string.replace(&from, &to); ++ } ++ } ++ path_string.replace("\\", "/") ++} ++ ++fn register_dep(dep: &str) -> Option { ++ if let Some(s) = env::var_os(&format!("DEP_{}_ROOT", dep)) { ++ prepend("PKG_CONFIG_PATH", Path::new(&s).join("lib/pkgconfig")); ++ return Some(s.into()) ++ } ++ if let Some(s) = env::var_os(&format!("DEP_{}_INCLUDE", dep)) { ++ let root = Path::new(&s).parent().unwrap(); ++ env::set_var(&format!("DEP_{}_ROOT", dep), root); ++ let path = root.join("lib/pkgconfig"); ++ if path.exists() { ++ prepend("PKG_CONFIG_PATH", path); ++ return Some(root.to_path_buf()) ++ } ++ } ++ ++ return None; ++ ++ fn prepend(var: &str, val: PathBuf) { ++ let prefix = env::var(var).unwrap_or(String::new()); ++ let mut v = vec![val]; ++ v.extend(env::split_paths(&prefix)); ++ env::set_var(var, &env::join_paths(v).unwrap()); ++ } ++} ++ ++fn build_msvc(target: &str) { ++ let cmd = cc::windows_registry::find(target, "nmake.exe"); ++ let mut cmd = cmd.unwrap_or(Command::new("nmake.exe")); ++ let src = env::current_dir().unwrap(); ++ let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); ++ let machine = if target.starts_with("x86_64") { ++ "x64" ++ } else if target.starts_with("i686") { ++ "x86" ++ } else { ++ panic!("unknown msvc target: {}", target); ++ }; ++ ++ t!(fs::create_dir_all(dst.join("include/curl"))); ++ t!(fs::create_dir_all(dst.join("lib"))); ++ ++ drop(fs::remove_dir_all(&dst.join("build"))); ++ cp_r(&src.join("curl"), &dst.join("build")); ++ cmd.current_dir(dst.join("build/winbuild")); ++ cmd.arg("/f").arg("Makefile.vc") ++ .arg("MODE=static") ++ .arg("ENABLE_IDN=yes") ++ .arg("DEBUG=no") ++ .arg("GEN_PDB=no") ++ .arg("ENABLE_WINSSL=yes") ++ .arg("ENABLE_SSPI=yes") ++ .arg(format!("MACHINE={}", machine)); ++ ++ // These env vars are intended for `make` usually, not nmake, so remove them ++ // unconditionally ++ cmd.env_remove("MAKEFLAGS") ++ .env_remove("MFLAGS"); ++ ++ // While in theory clang-cl can be used it doesn't work because we can't ++ // configure CFLAGS which means cross-compilation to 32-bit doesn't work. ++ // Just require MSVC cl.exe here. ++ cmd.env_remove("CC"); ++ ++ let features = env::var("CARGO_CFG_TARGET_FEATURE") ++ .unwrap_or(String::new()); ++ if features.contains("crt-static") { ++ cmd.arg("RTLIBCFG=static"); ++ } ++ ++ if let Some(inc) = env::var_os("DEP_Z_ROOT") { ++ let inc = PathBuf::from(inc); ++ let mut s = OsString::from("WITH_DEVEL="); ++ s.push(&inc); ++ cmd.arg("WITH_ZLIB=static").arg(s); ++ ++ // the build system for curl expects this library to be called ++ // zlib_a.lib, so make sure it's named correctly (where libz-sys just ++ // produces zlib.lib) ++ let _ = fs::remove_file(&inc.join("lib/zlib_a.lib")); ++ t!(fs::copy(inc.join("lib/zlib.lib"), inc.join("lib/zlib_a.lib"))); ++ } ++ run(&mut cmd, "nmake"); ++ ++ let name = format!("libcurl-vc-{}-release-static-zlib-static-\ ++ ipv6-sspi-winssl", machine); ++ let libs = dst.join("build/builds").join(name); ++ ++ t!(fs::copy(libs.join("lib/libcurl_a.lib"), dst.join("lib/curl.lib"))); ++ for f in t!(fs::read_dir(libs.join("include/curl"))) { ++ let path = t!(f).path(); ++ let dst = dst.join("include/curl").join(path.file_name().unwrap()); ++ t!(fs::copy(path, dst)); ++ } ++ t!(fs::remove_dir_all(dst.join("build/builds"))); ++ println!("cargo:rustc-link-lib=wldap32"); ++ println!("cargo:rustc-link-lib=advapi32"); ++ println!("cargo:rustc-link-lib=normaliz"); ++} ++ ++#[cfg(not(target_env = "msvc"))] ++fn try_vcpkg() -> bool { ++ false ++} ++ ++#[cfg(target_env = "msvc")] ++fn try_vcpkg() -> bool { ++ ++ // the import library for the dll is called libcurl_imp ++ let mut successful_probe_details = ++ match vcpkg::Config::new().lib_names("libcurl_imp", "libcurl") ++ .emit_includes(true).probe("curl") { ++ Ok(details) => Some(details), ++ Err(e) => { ++ println!("first run of vcpkg did not find libcurl: {}", e); ++ None ++ } ++ }; ++ ++ if successful_probe_details.is_none() { ++ match vcpkg::Config::new().lib_name("libcurl") ++ .emit_includes(true).probe("curl") { ++ Ok(details) => successful_probe_details = Some(details), ++ Err(e) => println!("second run of vcpkg did not find libcurl: {}", e), ++ } ++ } ++ ++ if successful_probe_details.is_some() { ++ // Found libcurl which depends on openssl, libssh2 and zlib ++ // in the a default vcpkg installation. Probe for them ++ // but do not fail if they are not present as we may be working ++ // with a customized vcpkg installation. ++ vcpkg::Config::new() ++ .lib_name("libeay32") ++ .lib_name("ssleay32") ++ .probe("openssl").ok(); ++ ++ vcpkg::probe_package("libssh2").ok(); ++ ++ vcpkg::Config::new() ++ .lib_names("zlib", "zlib1") ++ .probe("zlib").ok(); ++ ++ println!("cargo:rustc-link-lib=crypt32"); ++ println!("cargo:rustc-link-lib=gdi32"); ++ println!("cargo:rustc-link-lib=user32"); ++ println!("cargo:rustc-link-lib=wldap32"); ++ return true; ++ } ++ false ++} ++ ++fn cp_r(src: &Path, dst: &Path) { ++ t!(fs::create_dir(dst)); ++ for e in t!(src.read_dir()).map(|e| t!(e)) { ++ let src = e.path(); ++ let dst = dst.join(e.file_name()); ++ if t!(e.file_type()).is_dir() { ++ cp_r(&src, &dst); ++ } else { ++ t!(fs::copy(&src, &dst)); ++ } ++ } ++} diff --cc vendor/curl-sys-0.4.8/lib.rs index 000000000,000000000..383e3edfc new file mode 100644 --- /dev/null +++ b/vendor/curl-sys-0.4.8/lib.rs @@@ -1,0 -1,0 +1,1059 @@@ ++#![allow(bad_style)] ++#![doc(html_root_url = "https://docs.rs/curl-sys/0.3")] ++ ++extern crate libc; ++extern crate libz_sys; ++#[cfg(all(unix, not(target_os = "macos")))] ++extern crate openssl_sys; ++#[cfg(windows)] ++extern crate winapi; ++ ++use libc::{c_int, c_char, c_uint, c_short, c_long, c_double, c_void, size_t, time_t}; ++use libc::c_ulong; ++ ++#[cfg(unix)] ++pub use libc::fd_set; ++#[cfg(windows)] ++pub use winapi::um::winsock2::fd_set; ++#[cfg(windows)] ++use winapi::shared::ws2def::SOCKADDR; ++ ++#[cfg(target_env = "msvc")] ++#[doc(hidden)] ++pub type __enum_ty = libc::c_int; ++#[cfg(not(target_env = "msvc"))] ++#[doc(hidden)] ++pub type __enum_ty = libc::c_uint; ++ ++pub type CURLINFO = __enum_ty; ++pub type CURLoption = __enum_ty; ++pub type CURLcode = __enum_ty; ++pub type CURLversion = __enum_ty; ++pub type curl_off_t = i64; ++ ++pub enum CURL {} ++ ++#[cfg(unix)] ++pub type curl_socket_t = libc::c_int; ++#[cfg(unix)] ++pub const CURL_SOCKET_BAD: curl_socket_t = -1; ++#[cfg(all(windows, target_pointer_width = "32"))] ++pub type curl_socket_t = libc::c_uint; ++#[cfg(all(windows, target_pointer_width = "64"))] ++pub type curl_socket_t = u64; ++#[cfg(windows)] ++pub const CURL_SOCKET_BAD: curl_socket_t = !0; ++ ++pub enum curl_httppost { ++ // Note that this changed in some versions of libcurl, so we currently don't ++ // bind the fields as they're apparently not stable. ++ // pub next: *mut curl_httppost, ++ // pub name: *mut c_char, ++ // pub namelength: c_long, ++ // pub contents: *mut c_char, ++ // pub contentslength: c_long, ++ // pub buffer: *mut c_char, ++ // pub bufferlength: c_long, ++ // pub contenttype: *mut c_char, ++ // pub contentheader: *mut curl_slist, ++ // pub more: *mut curl_httppost, ++ // pub flags: c_long, ++ // pub showfilename: *mut c_char, ++ // pub userp: *mut c_void, ++} ++ ++// pub const HTTPPOST_FILENAME: c_long = 1 << 0; ++// pub const HTTPPOST_READFILE: c_long = 1 << 1; ++// pub const HTTPPOST_PTRNAME: c_long = 1 << 2; ++// pub const HTTPPOST_PTRCONTENTS: c_long = 1 << 3; ++// pub const HTTPPOST_BUFFER: c_long = 1 << 4; ++// pub const HTTPPOST_PTRBUFFER: c_long = 1 << 5; ++// pub const HTTPPOST_CALLBACK: c_long = 1 << 6; ++ ++pub type curl_progress_callback = extern fn(*mut c_void, ++ c_double, ++ c_double, ++ c_double, ++ c_double) -> c_int; ++// pub type curl_xferinfo_callback = extern fn(*mut c_void, ++// curl_off_t, ++// curl_off_t, ++// curl_off_t, ++// curl_off_t) -> c_int; ++ ++pub const CURL_WRITEFUNC_PAUSE: size_t = 0x10000001; ++ ++pub type curl_write_callback = extern fn(*mut c_char, ++ size_t, ++ size_t, ++ *mut c_void) -> size_t; ++ ++pub type curlfiletype = __enum_ty; ++pub const CURLFILETYPE_FILE: curlfiletype = 0; ++pub const CURLFILETYPE_DIRECTORY: curlfiletype = 1; ++pub const CURLFILETYPE_SYMLINK: curlfiletype = 2; ++pub const CURLFILETYPE_DEVICE_BLOCK: curlfiletype = 3; ++pub const CURLFILETYPE_DEVICE_CHAR: curlfiletype = 4; ++pub const CURLFILETYPE_NAMEDPIPE: curlfiletype = 5; ++pub const CURLFILETYPE_SOCKET: curlfiletype = 6; ++pub const CURLFILETYPE_DOOR: curlfiletype = 7; ++pub const CURLFILETYPE_UNKNOWN: curlfiletype = 8; ++ ++pub const CURLFINFOFLAG_KNOWN_FILENAME: c_uint = 1 << 0; ++pub const CURLFINFOFLAG_KNOWN_FILETYPE: c_uint = 1 << 1; ++pub const CURLFINFOFLAG_KNOWN_TIME: c_uint = 1 << 2; ++pub const CURLFINFOFLAG_KNOWN_PERM: c_uint = 1 << 3; ++pub const CURLFINFOFLAG_KNOWN_UID: c_uint = 1 << 4; ++pub const CURLFINFOFLAG_KNOWN_GID: c_uint = 1 << 5; ++pub const CURLFINFOFLAG_KNOWN_SIZE: c_uint = 1 << 6; ++pub const CURLFINFOFLAG_KNOWN_HLINKCOUNT: c_uint = 1 << 7; ++ ++#[repr(C)] ++pub struct curl_fileinfo { ++ pub filename: *mut c_char, ++ pub filetype: curlfiletype, ++ pub time: time_t, ++ pub perm: c_uint, ++ pub uid: c_int, ++ pub gid: c_int, ++ pub size: curl_off_t, ++ pub hardlinks: c_long, ++ ++ pub strings_time: *mut c_char, ++ pub strings_perm: *mut c_char, ++ pub strings_user: *mut c_char, ++ pub strings_group: *mut c_char, ++ pub strings_target: *mut c_char, ++ ++ pub flags: c_uint, ++ pub b_data: *mut c_char, ++ pub b_size: size_t, ++ pub b_used: size_t, ++} ++ ++pub const CURL_CHUNK_BGN_FUNC_OK: c_long = 0; ++pub const CURL_CHUNK_BGN_FUNC_FAIL: c_long = 1; ++pub const CURL_CHUNK_BGN_FUNC_SKIP: c_long = 2; ++pub type curl_chunk_bgn_callback = extern fn(*const c_void, ++ *mut c_void, ++ c_int) -> c_long; ++ ++pub const CURL_CHUNK_END_FUNC_OK: c_long = 0; ++pub const CURL_CHUNK_END_FUNC_FAIL: c_long = 1; ++pub type curl_chunk_end_callback = extern fn(*mut c_void) -> c_long; ++ ++pub const CURL_FNMATCHFUNC_MATCH: c_int = 0; ++pub const CURL_FNMATCHFUNC_NOMATCH: c_int = 1; ++pub const CURL_FNMATCHFUNC_FAIL: c_int = 2; ++pub type curl_fnmatch_callback = extern fn(*mut c_void, ++ *const c_char, ++ *const c_char) -> c_int; ++ ++pub const CURL_SEEKFUNC_OK: c_int = 0; ++pub const CURL_SEEKFUNC_FAIL: c_int = 1; ++pub const CURL_SEEKFUNC_CANTSEEK: c_int = 2; ++pub type curl_seek_callback = extern fn(*mut c_void, ++ curl_off_t, ++ c_int) -> c_int; ++ ++pub const CURL_READFUNC_ABORT: size_t = 0x10000000; ++pub const CURL_READFUNC_PAUSE: size_t = 0x10000001; ++pub type curl_read_callback = extern fn(*mut c_char, ++ size_t, ++ size_t, ++ *mut c_void) -> size_t; ++ ++// pub const CURL_SOCKOPT_OK: c_int = 0; ++// pub const CURL_SOCKOPT_ERROR: c_int = 1; ++// pub const CURL_SOCKOPT_ALREADY_CONNECTED: c_int = 2; ++// pub type curl_sockopt_callback = extern fn(*mut c_void, ++// curl_socket_t, ++// curlsocktype) -> c_int; ++ ++pub type curlioerr = __enum_ty; ++pub const CURLIOE_OK: curlioerr = 0; ++pub const CURLIOE_UNKNOWNCMD: curlioerr = 1; ++pub const CURLIOE_FAILRESTART: curlioerr = 2; ++ ++pub type curliocmd = __enum_ty; ++pub const CURLIOCMD_NOP: curliocmd = 0; ++pub const CURLIOCMD_RESTARTREAD: curliocmd = 1; ++ ++pub type curl_ioctl_callback = extern fn(*mut CURL, c_int, *mut c_void) -> curlioerr; ++ ++pub type curl_malloc_callback = extern fn(size_t) -> *mut c_void; ++pub type curl_free_callback = extern fn(*mut c_void); ++pub type curl_realloc_callback = extern fn(*mut c_void, size_t) -> *mut c_void; ++pub type curl_strdup_callback = extern fn(*const c_char) -> *mut c_char; ++pub type curl_calloc_callback = extern fn(size_t, size_t) -> *mut c_void; ++ ++pub type curl_infotype = __enum_ty; ++pub const CURLINFO_TEXT: curl_infotype = 0; ++pub const CURLINFO_HEADER_IN: curl_infotype = 1; ++pub const CURLINFO_HEADER_OUT: curl_infotype = 2; ++pub const CURLINFO_DATA_IN: curl_infotype = 3; ++pub const CURLINFO_DATA_OUT: curl_infotype = 4; ++pub const CURLINFO_SSL_DATA_IN: curl_infotype = 5; ++pub const CURLINFO_SSL_DATA_OUT: curl_infotype = 6; ++ ++pub type curl_debug_callback = extern fn(*mut CURL, ++ curl_infotype, ++ *mut c_char, ++ size_t, ++ *mut c_void) -> c_int; ++ ++pub const CURLE_OK: CURLcode = 0; ++pub const CURLE_UNSUPPORTED_PROTOCOL: CURLcode = 1; ++pub const CURLE_FAILED_INIT: CURLcode = 2; ++pub const CURLE_URL_MALFORMAT: CURLcode = 3; ++// pub const CURLE_NOT_BUILT_IN: CURLcode = 4; ++pub const CURLE_COULDNT_RESOLVE_PROXY: CURLcode = 5; ++pub const CURLE_COULDNT_RESOLVE_HOST: CURLcode = 6; ++pub const CURLE_COULDNT_CONNECT: CURLcode = 7; ++pub const CURLE_FTP_WEIRD_SERVER_REPLY: CURLcode = 8; ++pub const CURLE_REMOTE_ACCESS_DENIED: CURLcode = 9; ++// pub const CURLE_FTP_ACCEPT_FAILED: CURLcode = 10; ++pub const CURLE_FTP_WEIRD_PASS_REPLY: CURLcode = 11; ++// pub const CURLE_FTP_ACCEPT_TIMEOUT: CURLcode = 12; ++pub const CURLE_FTP_WEIRD_PASV_REPLY: CURLcode = 13; ++pub const CURLE_FTP_WEIRD_227_FORMAT: CURLcode = 14; ++pub const CURLE_FTP_CANT_GET_HOST: CURLcode = 15; ++pub const CURLE_OBSOLETE16: CURLcode = 16; ++pub const CURLE_FTP_COULDNT_SET_TYPE: CURLcode = 17; ++pub const CURLE_PARTIAL_FILE: CURLcode = 18; ++pub const CURLE_FTP_COULDNT_RETR_FILE: CURLcode = 19; ++pub const CURLE_OBSOLETE20: CURLcode = 20; ++pub const CURLE_QUOTE_ERROR: CURLcode = 21; ++pub const CURLE_HTTP_RETURNED_ERROR: CURLcode = 22; ++pub const CURLE_WRITE_ERROR: CURLcode = 23; ++pub const CURLE_OBSOLETE24: CURLcode = 24; ++pub const CURLE_UPLOAD_FAILED: CURLcode = 25; ++pub const CURLE_READ_ERROR: CURLcode = 26; ++pub const CURLE_OUT_OF_MEMORY: CURLcode = 27; ++pub const CURLE_OPERATION_TIMEDOUT: CURLcode = 28; ++pub const CURLE_OBSOLETE29: CURLcode = 29; ++pub const CURLE_FTP_PORT_FAILED: CURLcode = 30; ++pub const CURLE_FTP_COULDNT_USE_REST: CURLcode = 31; ++pub const CURLE_OBSOLETE32: CURLcode = 32; ++pub const CURLE_RANGE_ERROR: CURLcode = 33; ++pub const CURLE_HTTP_POST_ERROR: CURLcode = 34; ++pub const CURLE_SSL_CONNECT_ERROR: CURLcode = 35; ++pub const CURLE_BAD_DOWNLOAD_RESUME: CURLcode = 36; ++pub const CURLE_FILE_COULDNT_READ_FILE: CURLcode = 37; ++pub const CURLE_LDAP_CANNOT_BIND: CURLcode = 38; ++pub const CURLE_LDAP_SEARCH_FAILED: CURLcode = 39; ++pub const CURLE_OBSOLETE40: CURLcode = 40; ++pub const CURLE_FUNCTION_NOT_FOUND: CURLcode = 41; ++pub const CURLE_ABORTED_BY_CALLBACK: CURLcode = 42; ++pub const CURLE_BAD_FUNCTION_ARGUMENT: CURLcode = 43; ++pub const CURLE_OBSOLETE44: CURLcode = 44; ++pub const CURLE_INTERFACE_FAILED: CURLcode = 45; ++pub const CURLE_OBSOLETE46: CURLcode = 46; ++pub const CURLE_TOO_MANY_REDIRECTS : CURLcode = 47; ++pub const CURLE_UNKNOWN_OPTION: CURLcode = 48; ++pub const CURLE_TELNET_OPTION_SYNTAX : CURLcode = 49; ++pub const CURLE_OBSOLETE50: CURLcode = 50; ++pub const CURLE_PEER_FAILED_VERIFICATION: CURLcode = 51; ++pub const CURLE_GOT_NOTHING: CURLcode = 52; ++pub const CURLE_SSL_ENGINE_NOTFOUND: CURLcode = 53; ++pub const CURLE_SSL_ENGINE_SETFAILED: CURLcode = 54; ++pub const CURLE_SEND_ERROR: CURLcode = 55; ++pub const CURLE_RECV_ERROR: CURLcode = 56; ++pub const CURLE_OBSOLETE57: CURLcode = 57; ++pub const CURLE_SSL_CERTPROBLEM: CURLcode = 58; ++pub const CURLE_SSL_CIPHER: CURLcode = 59; ++pub const CURLE_SSL_CACERT: CURLcode = 60; ++pub const CURLE_BAD_CONTENT_ENCODING: CURLcode = 61; ++pub const CURLE_LDAP_INVALID_URL: CURLcode = 62; ++pub const CURLE_FILESIZE_EXCEEDED: CURLcode = 63; ++pub const CURLE_USE_SSL_FAILED: CURLcode = 64; ++pub const CURLE_SEND_FAIL_REWIND: CURLcode = 65; ++pub const CURLE_SSL_ENGINE_INITFAILED: CURLcode = 66; ++pub const CURLE_LOGIN_DENIED: CURLcode = 67; ++pub const CURLE_TFTP_NOTFOUND: CURLcode = 68; ++pub const CURLE_TFTP_PERM: CURLcode = 69; ++pub const CURLE_REMOTE_DISK_FULL: CURLcode = 70; ++pub const CURLE_TFTP_ILLEGAL: CURLcode = 71; ++pub const CURLE_TFTP_UNKNOWNID: CURLcode = 72; ++pub const CURLE_REMOTE_FILE_EXISTS: CURLcode = 73; ++pub const CURLE_TFTP_NOSUCHUSER: CURLcode = 74; ++pub const CURLE_CONV_FAILED: CURLcode = 75; ++pub const CURLE_CONV_REQD: CURLcode = 76; ++pub const CURLE_SSL_CACERT_BADFILE: CURLcode = 77; ++pub const CURLE_REMOTE_FILE_NOT_FOUND: CURLcode = 78; ++pub const CURLE_SSH: CURLcode = 79; ++pub const CURLE_SSL_SHUTDOWN_FAILED: CURLcode = 80; ++pub const CURLE_AGAIN: CURLcode = 81; ++pub const CURLE_SSL_CRL_BADFILE: CURLcode = 82; ++pub const CURLE_SSL_ISSUER_ERROR: CURLcode = 83; ++pub const CURLE_FTP_PRET_FAILED: CURLcode = 84; ++pub const CURLE_RTSP_CSEQ_ERROR: CURLcode = 85; ++pub const CURLE_RTSP_SESSION_ERROR: CURLcode = 86; ++pub const CURLE_FTP_BAD_FILE_LIST: CURLcode = 87; ++pub const CURLE_CHUNK_FAILED: CURLcode = 88; ++// pub const CURLE_NO_CONNECTION_AVAILABLE: CURLcode = 89; ++ ++pub type curl_conv_callback = extern fn(*mut c_char, size_t) -> CURLcode; ++pub type curl_ssl_ctx_callback = extern fn(*mut CURL, ++ *mut c_void, ++ *mut c_void) -> CURLcode; ++ ++pub type curl_proxytype = __enum_ty; ++pub const CURLPROXY_HTTP: curl_proxytype = 0; ++pub const CURLPROXY_HTTP_1_0: curl_proxytype = 1; ++pub const CURLPROXY_SOCKS4: curl_proxytype = 4; ++pub const CURLPROXY_SOCKS5: curl_proxytype = 5; ++pub const CURLPROXY_SOCKS4A: curl_proxytype = 6; ++pub const CURLPROXY_SOCKS5_HOSTNAME: curl_proxytype = 7; ++ ++pub const CURLAUTH_NONE: c_ulong = 0; ++pub const CURLAUTH_BASIC: c_ulong = 1 << 0; ++pub const CURLAUTH_DIGEST: c_ulong = 1 << 1; ++pub const CURLAUTH_GSSNEGOTIATE: c_ulong = 1 << 2; ++pub const CURLAUTH_NTLM: c_ulong = 1 << 3; ++pub const CURLAUTH_DIGEST_IE: c_ulong = 1 << 4; ++pub const CURLAUTH_NTLM_WB: c_ulong = 1 << 5; ++// pub const CURLAUTH_ONLY: c_ulong = 1 << 31; ++pub const CURLAUTH_ANY: c_ulong = !CURLAUTH_DIGEST_IE; ++pub const CURLAUTH_ANYSAFE: c_ulong = !(CURLAUTH_BASIC | CURLAUTH_DIGEST_IE); ++ ++// pub const CURLSSH_AUTH_ANY: c_ulong = !0; ++// pub const CURLSSH_AUTH_NONE: c_ulong = 0; ++// pub const CURLSSH_AUTH_PUBLICKEY: c_ulong = 1 << 0; ++// pub const CURLSSH_AUTH_PASSWORD: c_ulong = 1 << 1; ++// pub const CURLSSH_AUTH_HOST: c_ulong = 1 << 2; ++// pub const CURLSSH_AUTH_KEYBOARD: c_ulong = 1 << 3; ++// pub const CURLSSH_AUTH_AGENT: c_ulong = 1 << 4; ++// pub const CURLSSH_AUTH_DEFAULT: c_ulong = CURLSSH_AUTH_ANY; ++ ++pub const CURLGSSAPI_DELEGATION_NONE: c_ulong = 0; ++pub const CURLGSSAPI_DELEGATION_POLICY_FLAG: c_ulong = 1 << 0; ++pub const CURLGSSAPI_DELEGATION_FLAG: c_ulong = 1 << 1; ++ ++// pub type curl_khtype = __enum_ty; ++// pub const CURLKHTYPE_UNKNOWN: curl_khtype = 0; ++// pub const CURLKHTYPE_RSA1: curl_khtype = 1; ++// pub const CURLKHTYPE_RSA: curl_khtype = 2; ++// pub const CURLKHTYPE_DSS: curl_khtype = 3; ++ ++// #[repr(C)] ++// pub struct curl_khkey { ++// pub key: *const c_char, ++// pub len: size_t, ++// pub keytype: curl_khtype, ++// } ++ ++// pub type curl_khstat = __enum_ty; ++// pub const CURLKHSTAT_FINE_ADD_TO_FILE: curl_khstat = 0; ++// pub const CURLKHSTAT_FINE: curl_khstat = 1; ++// pub const CURLKHSTAT_REJECT: curl_khstat = 2; ++// pub const CURLKHSTAT_DEFER: curl_khstat = 3; ++// ++// pub type curl_khmatch = __enum_ty; ++// pub const CURLKHMATCH_OK: curl_khmatch = 0; ++// pub const CURLKHMATCH_MISMATCH: curl_khmatch = 1; ++// pub const CURLKHMATCH_MISSING: curl_khmatch = 2; ++ ++// pub type curl_sshkeycallback = extern fn(*mut CURL, ++// *const curl_khkey, ++// *const curl_khkey, ++// curl_khmatch, ++// *mut c_void) -> c_int; ++ ++pub const CURL_NETRC_IGNORED: c_ulong = 0; ++pub const CURL_NETRC_OPTIONAL: c_ulong = 1; ++pub const CURL_NETRC_REQUIRED: c_ulong = 2; ++ ++pub type curl_usessl = __enum_ty; ++pub const CURLUSESSL_NONE: curl_usessl = 0; ++pub const CURLUSESSL_TRY: curl_usessl = 1; ++pub const CURLUSESSL_CONTROL: curl_usessl = 2; ++pub const CURLUSESSL_ALL: curl_usessl = 3; ++ ++pub const CURLPROTO_HTTP: c_int = 1 << 0; ++pub const CURLPROTO_HTTPS: c_int = 1 << 1; ++pub const CURLPROTO_FILE: c_int = 1 << 10; ++ ++pub const CURLOPTTYPE_LONG: CURLoption = 0; ++pub const CURLOPTTYPE_OBJECTPOINT: CURLoption = 10_000; ++pub const CURLOPTTYPE_FUNCTIONPOINT: CURLoption = 20_000; ++pub const CURLOPTTYPE_OFF_T: CURLoption = 30_000; ++ ++pub const CURLOPT_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 1; ++pub const CURLOPT_URL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 2; ++pub const CURLOPT_PORT: CURLoption = CURLOPTTYPE_LONG + 3; ++pub const CURLOPT_PROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 4; ++pub const CURLOPT_USERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 5; ++pub const CURLOPT_PROXYUSERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 6; ++pub const CURLOPT_RANGE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 7; ++pub const CURLOPT_INFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 9; ++pub const CURLOPT_ERRORBUFFER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 10; ++pub const CURLOPT_WRITEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 11; ++pub const CURLOPT_READFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 12; ++pub const CURLOPT_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 13; ++pub const CURLOPT_INFILESIZE: CURLoption = CURLOPTTYPE_LONG + 14; ++pub const CURLOPT_POSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 15; ++pub const CURLOPT_REFERER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 16; ++pub const CURLOPT_FTPPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 17; ++pub const CURLOPT_USERAGENT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 18; ++pub const CURLOPT_LOW_SPEED_LIMIT: CURLoption = CURLOPTTYPE_LONG + 19; ++pub const CURLOPT_LOW_SPEED_TIME: CURLoption = CURLOPTTYPE_LONG + 20; ++pub const CURLOPT_RESUME_FROM: CURLoption = CURLOPTTYPE_LONG + 21; ++pub const CURLOPT_COOKIE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 22; ++pub const CURLOPT_HTTPHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 23; ++pub const CURLOPT_HTTPPOST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 24; ++pub const CURLOPT_SSLCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 25; ++pub const CURLOPT_KEYPASSWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 26; ++pub const CURLOPT_CRLF: CURLoption = CURLOPTTYPE_LONG + 27; ++pub const CURLOPT_QUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 28; ++pub const CURLOPT_WRITEHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 29; ++pub const CURLOPT_COOKIEFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 31; ++pub const CURLOPT_SSLVERSION: CURLoption = CURLOPTTYPE_LONG + 32; ++pub const CURLOPT_TIMECONDITION: CURLoption = CURLOPTTYPE_LONG + 33; ++pub const CURLOPT_TIMEVALUE: CURLoption = CURLOPTTYPE_LONG + 34; ++pub const CURLOPT_CUSTOMREQUEST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 36; ++pub const CURLOPT_STDERR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 37; ++pub const CURLOPT_POSTQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 39; ++pub const CURLOPT_WRITEINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 40; ++pub const CURLOPT_VERBOSE: CURLoption = CURLOPTTYPE_LONG + 41; ++pub const CURLOPT_HEADER: CURLoption = CURLOPTTYPE_LONG + 42; ++pub const CURLOPT_NOPROGRESS: CURLoption = CURLOPTTYPE_LONG + 43; ++pub const CURLOPT_NOBODY: CURLoption = CURLOPTTYPE_LONG + 44; ++pub const CURLOPT_FAILONERROR: CURLoption = CURLOPTTYPE_LONG + 45; ++pub const CURLOPT_UPLOAD: CURLoption = CURLOPTTYPE_LONG + 46; ++pub const CURLOPT_POST: CURLoption = CURLOPTTYPE_LONG + 47; ++pub const CURLOPT_DIRLISTONLY: CURLoption = CURLOPTTYPE_LONG + 48; ++pub const CURLOPT_APPEND: CURLoption = CURLOPTTYPE_LONG + 50; ++pub const CURLOPT_NETRC: CURLoption = CURLOPTTYPE_LONG + 51; ++pub const CURLOPT_FOLLOWLOCATION: CURLoption = CURLOPTTYPE_LONG + 52; ++pub const CURLOPT_TRANSFERTEXT: CURLoption = CURLOPTTYPE_LONG + 53; ++pub const CURLOPT_PUT: CURLoption = CURLOPTTYPE_LONG + 54; ++pub const CURLOPT_PROGRESSFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 56; ++pub const CURLOPT_PROGRESSDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 57; ++pub const CURLOPT_AUTOREFERER: CURLoption = CURLOPTTYPE_LONG + 58; ++pub const CURLOPT_PROXYPORT: CURLoption = CURLOPTTYPE_LONG + 59; ++pub const CURLOPT_POSTFIELDSIZE: CURLoption = CURLOPTTYPE_LONG + 60; ++pub const CURLOPT_HTTPPROXYTUNNEL: CURLoption = CURLOPTTYPE_LONG + 61; ++pub const CURLOPT_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 62; ++pub const CURLOPT_KRBLEVEL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 63; ++pub const CURLOPT_SSL_VERIFYPEER: CURLoption = CURLOPTTYPE_LONG + 64; ++pub const CURLOPT_CAINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 65; ++pub const CURLOPT_MAXREDIRS: CURLoption = CURLOPTTYPE_LONG + 68; ++pub const CURLOPT_FILETIME: CURLoption = CURLOPTTYPE_LONG + 69; ++pub const CURLOPT_TELNETOPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 70; ++pub const CURLOPT_MAXCONNECTS: CURLoption = CURLOPTTYPE_LONG + 71; ++pub const CURLOPT_CLOSEPOLICY: CURLoption = CURLOPTTYPE_LONG + 72; ++pub const CURLOPT_FRESH_CONNECT: CURLoption = CURLOPTTYPE_LONG + 74; ++pub const CURLOPT_FORBID_REUSE: CURLoption = CURLOPTTYPE_LONG + 75; ++pub const CURLOPT_RANDOM_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 76; ++pub const CURLOPT_EGDSOCKET: CURLoption = CURLOPTTYPE_OBJECTPOINT + 77; ++pub const CURLOPT_CONNECTTIMEOUT: CURLoption = CURLOPTTYPE_LONG + 78; ++pub const CURLOPT_HEADERFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 79; ++pub const CURLOPT_HTTPGET: CURLoption = CURLOPTTYPE_LONG + 80; ++pub const CURLOPT_SSL_VERIFYHOST: CURLoption = CURLOPTTYPE_LONG + 81; ++pub const CURLOPT_COOKIEJAR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 82; ++pub const CURLOPT_SSL_CIPHER_LIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 83; ++pub const CURLOPT_HTTP_VERSION: CURLoption = CURLOPTTYPE_LONG + 84; ++pub const CURLOPT_FTP_USE_EPSV: CURLoption = CURLOPTTYPE_LONG + 85; ++pub const CURLOPT_SSLCERTTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 86; ++pub const CURLOPT_SSLKEY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 87; ++pub const CURLOPT_SSLKEYTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 88; ++pub const CURLOPT_SSLENGINE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 89; ++pub const CURLOPT_SSLENGINE_DEFAULT: CURLoption = CURLOPTTYPE_LONG + 90; ++pub const CURLOPT_DNS_USE_GLOBAL_CACHE: CURLoption = CURLOPTTYPE_LONG + 91; ++pub const CURLOPT_DNS_CACHE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 92; ++pub const CURLOPT_PREQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 93; ++pub const CURLOPT_DEBUGFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 94; ++pub const CURLOPT_DEBUGDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 95; ++pub const CURLOPT_COOKIESESSION: CURLoption = CURLOPTTYPE_LONG + 96; ++pub const CURLOPT_CAPATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 97; ++pub const CURLOPT_BUFFERSIZE: CURLoption = CURLOPTTYPE_LONG + 98; ++pub const CURLOPT_NOSIGNAL: CURLoption = CURLOPTTYPE_LONG + 99; ++pub const CURLOPT_SHARE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 100; ++pub const CURLOPT_PROXYTYPE: CURLoption = CURLOPTTYPE_LONG + 101; ++pub const CURLOPT_ACCEPT_ENCODING: CURLoption = CURLOPTTYPE_OBJECTPOINT + 102; ++pub const CURLOPT_PRIVATE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 103; ++pub const CURLOPT_HTTP200ALIASES: CURLoption = CURLOPTTYPE_OBJECTPOINT + 104; ++pub const CURLOPT_UNRESTRICTED_AUTH: CURLoption = CURLOPTTYPE_LONG + 105; ++pub const CURLOPT_FTP_USE_EPRT: CURLoption = CURLOPTTYPE_LONG + 106; ++pub const CURLOPT_HTTPAUTH: CURLoption = CURLOPTTYPE_LONG + 107; ++pub const CURLOPT_SSL_CTX_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 108; ++pub const CURLOPT_SSL_CTX_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 109; ++pub const CURLOPT_FTP_CREATE_MISSING_DIRS: CURLoption = CURLOPTTYPE_LONG + 110; ++pub const CURLOPT_PROXYAUTH: CURLoption = CURLOPTTYPE_LONG + 111; ++pub const CURLOPT_FTP_RESPONSE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 112; ++pub const CURLOPT_IPRESOLVE: CURLoption = CURLOPTTYPE_LONG + 113; ++pub const CURLOPT_MAXFILESIZE: CURLoption = CURLOPTTYPE_LONG + 114; ++pub const CURLOPT_INFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 115; ++pub const CURLOPT_RESUME_FROM_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 116; ++pub const CURLOPT_MAXFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 117; ++pub const CURLOPT_NETRC_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 118; ++pub const CURLOPT_USE_SSL: CURLoption = CURLOPTTYPE_LONG + 119; ++pub const CURLOPT_POSTFIELDSIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 120; ++pub const CURLOPT_TCP_NODELAY: CURLoption = CURLOPTTYPE_LONG + 121; ++pub const CURLOPT_FTPSSLAUTH: CURLoption = CURLOPTTYPE_LONG + 129; ++pub const CURLOPT_IOCTLFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 130; ++pub const CURLOPT_IOCTLDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 131; ++pub const CURLOPT_FTP_ACCOUNT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 134; ++pub const CURLOPT_COOKIELIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 135; ++pub const CURLOPT_IGNORE_CONTENT_LENGTH: CURLoption = CURLOPTTYPE_LONG + 136; ++pub const CURLOPT_FTP_SKIP_PASV_IP: CURLoption = CURLOPTTYPE_LONG + 137; ++pub const CURLOPT_FTP_FILEMETHOD: CURLoption = CURLOPTTYPE_LONG + 138; ++pub const CURLOPT_LOCALPORT: CURLoption = CURLOPTTYPE_LONG + 139; ++pub const CURLOPT_LOCALPORTRANGE: CURLoption = CURLOPTTYPE_LONG + 140; ++pub const CURLOPT_CONNECT_ONLY: CURLoption = CURLOPTTYPE_LONG + 141; ++pub const CURLOPT_CONV_FROM_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 142; ++pub const CURLOPT_CONV_TO_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 143; ++pub const CURLOPT_CONV_FROM_UTF8_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 144; ++pub const CURLOPT_MAX_SEND_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 145; ++pub const CURLOPT_MAX_RECV_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 146; ++pub const CURLOPT_FTP_ALTERNATIVE_TO_USER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 147; ++pub const CURLOPT_SOCKOPTFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 148; ++pub const CURLOPT_SOCKOPTDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 149; ++pub const CURLOPT_SSL_SESSIONID_CACHE: CURLoption = CURLOPTTYPE_LONG + 150; ++pub const CURLOPT_SSH_AUTH_TYPES: CURLoption = CURLOPTTYPE_LONG + 151; ++pub const CURLOPT_SSH_PUBLIC_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 152; ++pub const CURLOPT_SSH_PRIVATE_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 153; ++pub const CURLOPT_FTP_SSL_CCC: CURLoption = CURLOPTTYPE_LONG + 154; ++pub const CURLOPT_TIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 155; ++pub const CURLOPT_CONNECTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 156; ++pub const CURLOPT_HTTP_TRANSFER_DECODING: CURLoption = CURLOPTTYPE_LONG + 157; ++pub const CURLOPT_HTTP_CONTENT_DECODING: CURLoption = CURLOPTTYPE_LONG + 158; ++pub const CURLOPT_NEW_FILE_PERMS: CURLoption = CURLOPTTYPE_LONG + 159; ++pub const CURLOPT_NEW_DIRECTORY_PERMS: CURLoption = CURLOPTTYPE_LONG + 160; ++pub const CURLOPT_POSTREDIR: CURLoption = CURLOPTTYPE_LONG + 161; ++pub const CURLOPT_SSH_HOST_PUBLIC_KEY_MD5: CURLoption = CURLOPTTYPE_OBJECTPOINT + 162; ++pub const CURLOPT_OPENSOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 163; ++pub const CURLOPT_OPENSOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 164; ++pub const CURLOPT_COPYPOSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 165; ++pub const CURLOPT_PROXY_TRANSFER_MODE: CURLoption = CURLOPTTYPE_LONG + 166; ++pub const CURLOPT_SEEKFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 167; ++pub const CURLOPT_SEEKDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 168; ++pub const CURLOPT_CRLFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 169; ++pub const CURLOPT_ISSUERCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 170; ++pub const CURLOPT_ADDRESS_SCOPE: CURLoption = CURLOPTTYPE_LONG + 171; ++pub const CURLOPT_CERTINFO: CURLoption = CURLOPTTYPE_LONG + 172; ++pub const CURLOPT_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 173; ++pub const CURLOPT_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 174; ++pub const CURLOPT_PROXYUSERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 175; ++pub const CURLOPT_PROXYPASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 176; ++pub const CURLOPT_NOPROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 177; ++pub const CURLOPT_TFTP_BLKSIZE: CURLoption = CURLOPTTYPE_LONG + 178; ++pub const CURLOPT_SOCKS5_GSSAPI_SERVICE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 179; ++pub const CURLOPT_SOCKS5_GSSAPI_NEC: CURLoption = CURLOPTTYPE_LONG + 180; ++pub const CURLOPT_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 181; ++pub const CURLOPT_REDIR_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 182; ++pub const CURLOPT_SSH_KNOWNHOSTS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 183; ++pub const CURLOPT_SSH_KEYFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 184; ++pub const CURLOPT_SSH_KEYDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 185; ++pub const CURLOPT_MAIL_FROM: CURLoption = CURLOPTTYPE_OBJECTPOINT + 186; ++pub const CURLOPT_MAIL_RCPT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 187; ++pub const CURLOPT_FTP_USE_PRET: CURLoption = CURLOPTTYPE_LONG + 188; ++pub const CURLOPT_RTSP_REQUEST: CURLoption = CURLOPTTYPE_LONG + 189; ++pub const CURLOPT_RTSP_SESSION_ID: CURLoption = CURLOPTTYPE_OBJECTPOINT + 190; ++pub const CURLOPT_RTSP_STREAM_URI: CURLoption = CURLOPTTYPE_OBJECTPOINT + 191; ++pub const CURLOPT_RTSP_TRANSPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 192; ++pub const CURLOPT_RTSP_CLIENT_CSEQ: CURLoption = CURLOPTTYPE_LONG + 193; ++pub const CURLOPT_RTSP_SERVER_CSEQ: CURLoption = CURLOPTTYPE_LONG + 194; ++pub const CURLOPT_INTERLEAVEDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 195; ++pub const CURLOPT_INTERLEAVEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 196; ++pub const CURLOPT_WILDCARDMATCH: CURLoption = CURLOPTTYPE_LONG + 197; ++pub const CURLOPT_CHUNK_BGN_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 198; ++pub const CURLOPT_CHUNK_END_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 199; ++pub const CURLOPT_FNMATCH_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 200; ++pub const CURLOPT_CHUNK_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 201; ++pub const CURLOPT_FNMATCH_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 202; ++pub const CURLOPT_RESOLVE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 203; ++pub const CURLOPT_TLSAUTH_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 204; ++pub const CURLOPT_TLSAUTH_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 205; ++pub const CURLOPT_TLSAUTH_TYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 206; ++pub const CURLOPT_TRANSFER_ENCODING: CURLoption = CURLOPTTYPE_LONG + 207; ++pub const CURLOPT_CLOSESOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 208; ++pub const CURLOPT_CLOSESOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 209; ++pub const CURLOPT_GSSAPI_DELEGATION: CURLoption = CURLOPTTYPE_LONG + 210; ++// pub const CURLOPT_DNS_SERVERS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 211; ++// pub const CURLOPT_ACCEPTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 212; ++pub const CURLOPT_TCP_KEEPALIVE: CURLoption = CURLOPTTYPE_LONG + 213; ++pub const CURLOPT_TCP_KEEPIDLE: CURLoption = CURLOPTTYPE_LONG + 214; ++pub const CURLOPT_TCP_KEEPINTVL: CURLoption = CURLOPTTYPE_LONG + 215; ++pub const CURLOPT_SSL_OPTIONS: CURLoption = CURLOPTTYPE_LONG + 216; ++// pub const CURLOPT_MAIL_AUTH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 217; ++// pub const CURLOPT_SASL_IR: CURLoption = CURLOPTTYPE_LONG + 218; ++// pub const CURLOPT_XFERINFOFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 219; ++// pub const CURLOPT_XOAUTH2_BEARER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 220; ++// pub const CURLOPT_DNS_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 221; ++// pub const CURLOPT_DNS_LOCAL_IP4: CURLoption = CURLOPTTYPE_OBJECTPOINT + 222; ++// pub const CURLOPT_DNS_LOCAL_IP6: CURLoption = CURLOPTTYPE_OBJECTPOINT + 223; ++// pub const CURLOPT_LOGIN_OPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 224; ++pub const CURLOPT_UNIX_SOCKET_PATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 231; ++ ++pub const CURL_IPRESOLVE_WHATEVER: c_int = 0; ++pub const CURL_IPRESOLVE_V4: c_int = 1; ++pub const CURL_IPRESOLVE_V6: c_int = 2; ++ ++pub const CURLSSLOPT_ALLOW_BEAST: c_long = 1 << 0; ++pub const CURLSSLOPT_NO_REVOKE: c_long = 1 << 1; ++ ++/// These enums are for use with the CURLOPT_HTTP_VERSION option. ++/// ++/// Setting this means we don't care, and that we'd like the library to choose ++/// the best possible for us! ++pub const CURL_HTTP_VERSION_NONE: c_int = 0; ++/// Please use HTTP 1.0 in the request ++pub const CURL_HTTP_VERSION_1_0: c_int = 1; ++/// Please use HTTP 1.1 in the request ++pub const CURL_HTTP_VERSION_1_1: c_int = 2; ++/// Please use HTTP 2 in the request ++/// (Added in CURL 7.33.0) ++pub const CURL_HTTP_VERSION_2_0: c_int = 3; ++/// Use version 2 for HTTPS, version 1.1 for HTTP ++/// (Added in CURL 7.47.0) ++pub const CURL_HTTP_VERSION_2TLS: c_int = 4; ++/// Please use HTTP 2 without HTTP/1.1 Upgrade ++/// (Added in CURL 7.49.0) ++pub const CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE: c_int = 5; ++ ++// Note that the type here is wrong, it's just intended to just be an enum. ++pub const CURL_SSLVERSION_DEFAULT: CURLoption = 0; ++pub const CURL_SSLVERSION_TLSv1: CURLoption = 1; ++pub const CURL_SSLVERSION_SSLv2: CURLoption = 2; ++pub const CURL_SSLVERSION_SSLv3: CURLoption = 3; ++// pub const CURL_SSLVERSION_TLSv1_0: CURLoption = 4; ++// pub const CURL_SSLVERSION_TLSv1_1: CURLoption = 5; ++// pub const CURL_SSLVERSION_TLSv1_2: CURLoption = 6; ++ ++pub const CURLOPT_READDATA: CURLoption = CURLOPT_INFILE; ++pub const CURLOPT_WRITEDATA: CURLoption = CURLOPT_FILE; ++pub const CURLOPT_HEADERDATA: CURLoption = CURLOPT_WRITEHEADER; ++ ++pub type curl_TimeCond = __enum_ty; ++pub const CURL_TIMECOND_NONE: curl_TimeCond = 0; ++pub const CURL_TIMECOND_IFMODSINCE: curl_TimeCond = 1; ++pub const CURL_TIMECOND_IFUNMODSINCE: curl_TimeCond = 2; ++pub const CURL_TIMECOND_LASTMOD: curl_TimeCond = 3; ++ ++pub type CURLformoption = __enum_ty; ++pub const CURLFORM_NOTHING: CURLformoption = 0; ++pub const CURLFORM_COPYNAME: CURLformoption = 1; ++pub const CURLFORM_PTRNAME: CURLformoption = 2; ++pub const CURLFORM_NAMELENGTH: CURLformoption = 3; ++pub const CURLFORM_COPYCONTENTS: CURLformoption = 4; ++pub const CURLFORM_PTRCONTENTS: CURLformoption = 5; ++pub const CURLFORM_CONTENTSLENGTH: CURLformoption = 6; ++pub const CURLFORM_FILECONTENT: CURLformoption = 7; ++pub const CURLFORM_ARRAY: CURLformoption = 8; ++pub const CURLFORM_OBSOLETE: CURLformoption = 9; ++pub const CURLFORM_FILE: CURLformoption = 10; ++pub const CURLFORM_BUFFER: CURLformoption = 11; ++pub const CURLFORM_BUFFERPTR: CURLformoption = 12; ++pub const CURLFORM_BUFFERLENGTH: CURLformoption = 13; ++pub const CURLFORM_CONTENTTYPE: CURLformoption = 14; ++pub const CURLFORM_CONTENTHEADER: CURLformoption = 15; ++pub const CURLFORM_FILENAME: CURLformoption = 16; ++pub const CURLFORM_END: CURLformoption = 17; ++pub const CURLFORM_STREAM: CURLformoption = 19; ++ ++pub type CURLFORMcode = __enum_ty; ++pub const CURL_FORMADD_OK: CURLFORMcode = 0; ++pub const CURL_FORMADD_MEMORY: CURLFORMcode = 1; ++pub const CURL_FORMADD_OPTION_TWICE: CURLFORMcode = 2; ++pub const CURL_FORMADD_NULL: CURLFORMcode = 3; ++pub const CURL_FORMADD_UNKNOWN_OPTION: CURLFORMcode = 4; ++pub const CURL_FORMADD_INCOMPLETE: CURLFORMcode = 5; ++pub const CURL_FORMADD_ILLEGAL_ARRAY: CURLFORMcode = 6; ++pub const CURL_FORMADD_DISABLED: CURLFORMcode = 7; ++ ++#[repr(C)] ++pub struct curl_forms { ++ pub option: CURLformoption, ++ pub value: *const c_char, ++} ++ ++pub type curl_formget_callback = extern fn(*mut c_void, ++ *const c_char, ++ size_t) -> size_t; ++ ++#[repr(C)] ++pub struct curl_slist { ++ pub data: *mut c_char, ++ pub next: *mut curl_slist, ++} ++ ++#[repr(C)] ++pub struct curl_certinfo { ++ pub num_of_certs: c_int, ++ pub certinfo: *mut *mut curl_slist, ++} ++ ++// pub type curl_sslbackend = __enum_ty; ++// pub const CURLSSLBACKEND_NONE: curl_sslbackend = 0; ++// pub const CURLSSLBACKEND_OPENSSL: curl_sslbackend = 1; ++// pub const CURLSSLBACKEND_GNUTLS: curl_sslbackend = 2; ++// pub const CURLSSLBACKEND_NSS: curl_sslbackend = 3; ++// pub const CURLSSLBACKEND_QSOSSL: curl_sslbackend = 4; ++// pub const CURLSSLBACKEND_GSKIT: curl_sslbackend = 5; ++// pub const CURLSSLBACKEND_POLARSSL: curl_sslbackend = 6; ++// pub const CURLSSLBACKEND_CYASSL: curl_sslbackend = 7; ++// pub const CURLSSLBACKEND_SCHANNEL: curl_sslbackend = 8; ++// pub const CURLSSLBACKEND_DARWINSSL: curl_sslbackend = 9; ++ ++// #[repr(C)] ++// pub struct curl_tlssessioninfo { ++// pub backend: curl_sslbackend, ++// pub internals: *mut c_void, ++// } ++ ++pub const CURLINFO_STRING: CURLINFO = 0x100000; ++pub const CURLINFO_LONG: CURLINFO = 0x200000; ++pub const CURLINFO_DOUBLE: CURLINFO = 0x300000; ++pub const CURLINFO_SLIST: CURLINFO = 0x400000; ++pub const CURLINFO_MASK: CURLINFO = 0x0fffff; ++pub const CURLINFO_TYPEMASK: CURLINFO = 0xf00000; ++ ++pub const CURLINFO_EFFECTIVE_URL: CURLINFO = CURLINFO_STRING + 1; ++pub const CURLINFO_RESPONSE_CODE: CURLINFO = CURLINFO_LONG + 2; ++pub const CURLINFO_TOTAL_TIME: CURLINFO = CURLINFO_DOUBLE + 3; ++pub const CURLINFO_NAMELOOKUP_TIME: CURLINFO = CURLINFO_DOUBLE + 4; ++pub const CURLINFO_CONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 5; ++pub const CURLINFO_PRETRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 6; ++pub const CURLINFO_SIZE_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 7; ++pub const CURLINFO_SIZE_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 8; ++pub const CURLINFO_SPEED_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 9; ++pub const CURLINFO_SPEED_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 10; ++pub const CURLINFO_HEADER_SIZE: CURLINFO = CURLINFO_LONG + 11; ++pub const CURLINFO_REQUEST_SIZE: CURLINFO = CURLINFO_LONG + 12; ++pub const CURLINFO_SSL_VERIFYRESULT: CURLINFO = CURLINFO_LONG + 13; ++pub const CURLINFO_FILETIME: CURLINFO = CURLINFO_LONG + 14; ++pub const CURLINFO_CONTENT_LENGTH_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 15; ++pub const CURLINFO_CONTENT_LENGTH_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 16; ++pub const CURLINFO_STARTTRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 17; ++pub const CURLINFO_CONTENT_TYPE: CURLINFO = CURLINFO_STRING + 18; ++pub const CURLINFO_REDIRECT_TIME: CURLINFO = CURLINFO_DOUBLE + 19; ++pub const CURLINFO_REDIRECT_COUNT: CURLINFO = CURLINFO_LONG + 20; ++pub const CURLINFO_PRIVATE: CURLINFO = CURLINFO_STRING + 21; ++pub const CURLINFO_HTTP_CONNECTCODE: CURLINFO = CURLINFO_LONG + 22; ++pub const CURLINFO_HTTPAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 23; ++pub const CURLINFO_PROXYAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 24; ++pub const CURLINFO_OS_ERRNO: CURLINFO = CURLINFO_LONG + 25; ++pub const CURLINFO_NUM_CONNECTS: CURLINFO = CURLINFO_LONG + 26; ++pub const CURLINFO_SSL_ENGINES: CURLINFO = CURLINFO_SLIST + 27; ++pub const CURLINFO_COOKIELIST: CURLINFO = CURLINFO_SLIST + 28; ++pub const CURLINFO_LASTSOCKET: CURLINFO = CURLINFO_LONG + 29; ++pub const CURLINFO_FTP_ENTRY_PATH: CURLINFO = CURLINFO_STRING + 30; ++pub const CURLINFO_REDIRECT_URL: CURLINFO = CURLINFO_STRING + 31; ++pub const CURLINFO_PRIMARY_IP: CURLINFO = CURLINFO_STRING + 32; ++pub const CURLINFO_APPCONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 33; ++pub const CURLINFO_CERTINFO: CURLINFO = CURLINFO_SLIST + 34; ++pub const CURLINFO_CONDITION_UNMET: CURLINFO = CURLINFO_LONG + 35; ++pub const CURLINFO_RTSP_SESSION_ID: CURLINFO = CURLINFO_STRING + 36; ++pub const CURLINFO_RTSP_CLIENT_CSEQ: CURLINFO = CURLINFO_LONG + 37; ++pub const CURLINFO_RTSP_SERVER_CSEQ: CURLINFO = CURLINFO_LONG + 38; ++pub const CURLINFO_RTSP_CSEQ_RECV: CURLINFO = CURLINFO_LONG + 39; ++pub const CURLINFO_PRIMARY_PORT: CURLINFO = CURLINFO_LONG + 40; ++pub const CURLINFO_LOCAL_IP: CURLINFO = CURLINFO_STRING + 41; ++pub const CURLINFO_LOCAL_PORT: CURLINFO = CURLINFO_LONG + 42; ++// pub const CURLINFO_TLS_SESSION: CURLINFO = CURLINFO_SLIST + 43; ++ ++pub type curl_closepolicy = __enum_ty; ++pub const CURLCLOSEPOLICY_NONE: curl_closepolicy = 0; ++pub const CURLCLOSEPOLICY_OLDEST: curl_closepolicy = 1; ++pub const CURLCLOSEPOLICY_LEAST_RECENTLY_USED: curl_closepolicy = 2; ++pub const CURLCLOSEPOLICY_LEAST_TRAFFIC: curl_closepolicy = 3; ++pub const CURLCLOSEPOLICY_SLOWEST: curl_closepolicy = 4; ++pub const CURLCLOSEPOLICY_CALLBACK: curl_closepolicy = 5; ++ ++pub const CURL_GLOBAL_SSL: c_long = 1 << 0; ++pub const CURL_GLOBAL_WIN32: c_long = 1 << 1; ++pub const CURL_GLOBAL_ALL: c_long = CURL_GLOBAL_SSL | CURL_GLOBAL_WIN32; ++pub const CURL_GLOBAL_NOTHING: c_long = 0; ++pub const CURL_GLOBAL_DEFAULT: c_long = CURL_GLOBAL_ALL; ++// pub const CURL_GLOBAL_ACK_EINTR: c_long = 1 << 2; ++ ++pub type curl_lock_data = __enum_ty; ++pub const CURL_LOCK_DATA_NONE: curl_lock_data = 0; ++pub const CURL_LOCK_DATA_SHARE: curl_lock_data = 1; ++pub const CURL_LOCK_DATA_COOKIE: curl_lock_data = 2; ++pub const CURL_LOCK_DATA_DNS: curl_lock_data = 3; ++pub const CURL_LOCK_DATA_SSL_SESSION: curl_lock_data = 4; ++pub const CURL_LOCK_DATA_CONNECT: curl_lock_data = 5; ++ ++pub type curl_lock_access = __enum_ty; ++pub const CURL_LOCK_ACCESS_NONE: curl_lock_access = 0; ++pub const CURL_LOCK_ACCESS_SHARED: curl_lock_access = 1; ++pub const CURL_LOCK_ACCESS_SINGLE: curl_lock_access = 2; ++ ++pub type curl_lock_function = extern fn(*mut CURL, ++ curl_lock_data, ++ curl_lock_access, ++ *mut c_void); ++pub type curl_unlock_function = extern fn(*mut CURL, ++ curl_lock_data, ++ *mut c_void); ++ ++pub enum CURLSH {} ++ ++pub type CURLSHcode = __enum_ty; ++pub const CURLSHE_OK: CURLSHcode = 0; ++pub const CURLSHE_BAD_OPTION: CURLSHcode = 1; ++pub const CURLSHE_IN_USE: CURLSHcode = 2; ++pub const CURLSHE_INVALID: CURLSHcode = 3; ++pub const CURLSHE_NOMEM: CURLSHcode = 4; ++// pub const CURLSHE_NOT_BUILT_IN: CURLSHcode = 5; ++ ++pub type CURLSHoption = __enum_ty; ++pub const CURLSHOPT_NONE: CURLSHoption = 0; ++pub const CURLSHOPT_SHARE: CURLSHoption = 1; ++pub const CURLSHOPT_UNSHARE: CURLSHoption = 2; ++pub const CURLSHOPT_LOCKFUNC: CURLSHoption = 3; ++pub const CURLSHOPT_UNLOCKFUNC: CURLSHoption = 4; ++pub const CURLSHOPT_USERDATA: CURLSHoption = 5; ++ ++pub const CURLVERSION_FIRST: CURLversion = 0; ++pub const CURLVERSION_SECOND: CURLversion = 1; ++pub const CURLVERSION_THIRD: CURLversion = 2; ++pub const CURLVERSION_FOURTH: CURLversion = 3; ++pub const CURLVERSION_FIFTH: CURLversion = 4; ++pub const CURLVERSION_NOW: CURLversion = CURLVERSION_FIFTH; ++ ++#[repr(C)] ++pub struct curl_version_info_data { ++ pub age: CURLversion, ++ pub version: *const c_char, ++ pub version_num: c_uint, ++ pub host: *const c_char, ++ pub features: c_int, ++ pub ssl_version: *const c_char, ++ pub ssl_version_num: c_long, ++ pub libz_version: *const c_char, ++ pub protocols: *const *const c_char, ++ pub ares: *const c_char, ++ pub ares_num: c_int, ++ pub libidn: *const c_char, ++ pub iconv_ver_num: c_int, ++ pub libssh_version: *const c_char, ++ pub brotli_ver_num: c_uint, ++ pub brotli_version: *const c_char, ++} ++ ++pub const CURL_VERSION_IPV6: c_int = 1 << 0; ++pub const CURL_VERSION_KERBEROS4: c_int = 1 << 1; ++pub const CURL_VERSION_SSL: c_int = 1 << 2; ++pub const CURL_VERSION_LIBZ: c_int = 1 << 3; ++pub const CURL_VERSION_NTLM: c_int = 1 << 4; ++pub const CURL_VERSION_GSSNEGOTIATE: c_int = 1 << 5; ++pub const CURL_VERSION_DEBUG: c_int = 1 << 6; ++pub const CURL_VERSION_ASYNCHDNS: c_int = 1 << 7; ++pub const CURL_VERSION_SPNEGO: c_int = 1 << 8; ++pub const CURL_VERSION_LARGEFILE: c_int = 1 << 9; ++pub const CURL_VERSION_IDN: c_int = 1 << 10; ++pub const CURL_VERSION_SSPI: c_int = 1 << 11; ++pub const CURL_VERSION_CONV: c_int = 1 << 12; ++pub const CURL_VERSION_CURLDEBUG: c_int = 1 << 13; ++pub const CURL_VERSION_TLSAUTH_SRP: c_int = 1 << 14; ++pub const CURL_VERSION_NTLM_WB: c_int = 1 << 15; ++pub const CURL_VERSION_HTTP2: c_int = 1 << 16; ++pub const CURL_VERSION_UNIX_SOCKETS: c_int = 1 << 19; ++ ++pub const CURLPAUSE_RECV: c_int = 1 << 0; ++pub const CURLPAUSE_RECV_CONT: c_int = 0; ++pub const CURLPAUSE_SEND: c_int = 1 << 2; ++pub const CURLPAUSE_SEND_CONT: c_int = 0; ++ ++pub enum CURLM {} ++ ++pub type CURLMcode = c_int; ++pub const CURLM_CALL_MULTI_PERFORM: CURLMcode = -1; ++pub const CURLM_OK: CURLMcode = 0; ++pub const CURLM_BAD_HANDLE: CURLMcode = 1; ++pub const CURLM_BAD_EASY_HANDLE: CURLMcode = 2; ++pub const CURLM_OUT_OF_MEMORY: CURLMcode = 3; ++pub const CURLM_INTERNAL_ERROR: CURLMcode = 4; ++pub const CURLM_BAD_SOCKET: CURLMcode = 5; ++pub const CURLM_UNKNOWN_OPTION: CURLMcode = 6; ++// pub const CURLM_ADDED_ALREADY: CURLMcode = 7; ++ ++pub type CURLMSG = __enum_ty; ++pub const CURLMSG_NONE: CURLMSG = 0; ++pub const CURLMSG_DONE: CURLMSG = 1; ++ ++#[repr(C)] ++pub struct CURLMsg { ++ pub msg: CURLMSG, ++ pub easy_handle: *mut CURL, ++ pub data: *mut c_void, ++} ++ ++pub const CURL_WAIT_POLLIN: c_short = 0x1; ++pub const CURL_WAIT_POLLPRI: c_short = 0x2; ++pub const CURL_WAIT_POLLOUT: c_short = 0x4; ++ ++#[repr(C)] ++pub struct curl_waitfd { ++ pub fd: curl_socket_t, ++ pub events: c_short, ++ pub revents: c_short, ++} ++ ++pub const CURL_POLL_NONE: c_int = 0; ++pub const CURL_POLL_IN: c_int = 1; ++pub const CURL_POLL_OUT: c_int = 2; ++pub const CURL_POLL_INOUT: c_int = 3; ++pub const CURL_POLL_REMOVE: c_int = 4; ++pub const CURL_CSELECT_IN: c_int = 1; ++pub const CURL_CSELECT_OUT: c_int = 2; ++pub const CURL_CSELECT_ERR: c_int = 4; ++pub const CURL_SOCKET_TIMEOUT: curl_socket_t = CURL_SOCKET_BAD; ++ ++pub type curl_socket_callback = extern fn(*mut CURL, ++ curl_socket_t, ++ c_int, ++ *mut c_void, ++ *mut c_void) -> c_int; ++pub type curl_multi_timer_callback = extern fn(*mut CURLM, ++ c_long, ++ *mut c_void) -> c_int; ++ ++pub type CURLMoption = __enum_ty; ++pub const CURLMOPT_SOCKETFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 1; ++pub const CURLMOPT_SOCKETDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 2; ++pub const CURLMOPT_PIPELINING: CURLMoption = CURLOPTTYPE_LONG + 3; ++pub const CURLMOPT_TIMERFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 4; ++pub const CURLMOPT_TIMERDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 5; ++// pub const CURLMOPT_MAXCONNECTS: CURLMoption = CURLOPTTYPE_LONG + 6; ++pub const CURLMOPT_MAX_HOST_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 7; ++pub const CURLMOPT_MAX_PIPELINE_LENGTH: CURLMoption = CURLOPTTYPE_LONG + 8; ++// pub const CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 9; ++// pub const CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 10; ++// pub const CURLMOPT_PIPELINING_SITE_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 11; ++// pub const CURLMOPT_PIPELINING_SERVER_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 12; ++// pub const CURLMOPT_MAX_TOTAL_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 13; ++ ++// These enums are for use with the CURLMOPT_PIPELINING option. ++pub const CURLPIPE_NOTHING: c_long = 0; ++pub const CURLPIPE_HTTP1: c_long = 1; ++pub const CURLPIPE_MULTIPLEX: c_long = 2; ++ ++pub const CURL_ERROR_SIZE: usize = 256; ++ ++pub type curl_opensocket_callback = extern fn(*mut c_void, ++ curlsocktype, ++ *mut curl_sockaddr) -> curl_socket_t; ++pub type curlsocktype = __enum_ty; ++pub const CURLSOCKTYPE_IPCXN: curlsocktype = 0; ++pub const CURLSOCKTYPE_ACCEPT: curlsocktype = 1; ++pub const CURLSOCKTYPE_LAST: curlsocktype = 2; ++ ++#[repr(C)] ++pub struct curl_sockaddr { ++ pub family: c_int, ++ pub socktype: c_int, ++ pub protocol: c_int, ++ pub addrlen: c_uint, ++ #[cfg(unix)] ++ pub addr: libc::sockaddr, ++ #[cfg(windows)] ++ pub addr: SOCKADDR, ++} ++ ++extern { ++ pub fn curl_formadd(httppost: *mut *mut curl_httppost, ++ last_post: *mut *mut curl_httppost, ++ ...) -> CURLFORMcode; ++ pub fn curl_formget(form: *mut curl_httppost, ++ arg: *mut c_void, ++ append: curl_formget_callback) -> c_int; ++ pub fn curl_formfree(form: *mut curl_httppost); ++ ++ pub fn curl_version() -> *mut c_char; ++ ++ pub fn curl_easy_escape(handle: *mut CURL, ++ string: *const c_char, ++ length: c_int) -> *mut c_char; ++ pub fn curl_easy_unescape(handle: *mut CURL, ++ string: *const c_char, ++ length: c_int, ++ outlength: *mut c_int) -> *mut c_char; ++ pub fn curl_free(p: *mut c_void); ++ ++ pub fn curl_global_init(flags: c_long) -> CURLcode; ++ pub fn curl_global_init_mem(flags: c_long, ++ m: curl_malloc_callback, ++ f: curl_free_callback, ++ r: curl_realloc_callback, ++ s: curl_strdup_callback, ++ c: curl_calloc_callback) -> CURLcode; ++ pub fn curl_global_cleanup(); ++ ++ pub fn curl_slist_append(list: *mut curl_slist, ++ val: *const c_char) -> *mut curl_slist; ++ pub fn curl_slist_free_all(list: *mut curl_slist); ++ ++ pub fn curl_getdate(p: *const c_char, _: *const time_t) -> time_t; ++ ++ pub fn curl_share_init() -> *mut CURLSH; ++ pub fn curl_share_setopt(sh: *mut CURLSH, ++ opt: CURLSHoption, ++ ...) -> CURLSHcode; ++ pub fn curl_share_cleanup(sh: *mut CURLSH) -> CURLSHcode; ++ ++ pub fn curl_version_info(t: CURLversion) -> *mut curl_version_info_data; ++ ++ pub fn curl_easy_strerror(code: CURLcode) -> *const c_char; ++ pub fn curl_share_strerror(code: CURLSHcode) -> *const c_char; ++ pub fn curl_easy_pause(handle: *mut CURL, bitmask: c_int) -> CURLcode; ++ ++ pub fn curl_easy_init() -> *mut CURL; ++ pub fn curl_easy_setopt(curl: *mut CURL, option: CURLoption, ...) -> CURLcode; ++ pub fn curl_easy_perform(curl: *mut CURL) -> CURLcode; ++ pub fn curl_easy_cleanup(curl: *mut CURL); ++ pub fn curl_easy_getinfo(curl: *mut CURL, info: CURLINFO, ...) -> CURLcode; ++ pub fn curl_easy_duphandle(curl: *mut CURL) -> *mut CURL; ++ pub fn curl_easy_reset(curl: *mut CURL); ++ pub fn curl_easy_recv(curl: *mut CURL, ++ buffer: *mut c_void, ++ buflen: size_t, ++ n: *mut size_t) -> CURLcode; ++ pub fn curl_easy_send(curl: *mut CURL, ++ buffer: *const c_void, ++ buflen: size_t, ++ n: *mut size_t) -> CURLcode; ++ ++ pub fn curl_multi_init() -> *mut CURLM; ++ pub fn curl_multi_add_handle(multi_handle: *mut CURLM, ++ curl_handle: *mut CURL) -> CURLMcode; ++ pub fn curl_multi_remove_handle(multi_handle: *mut CURLM, ++ curl_handle: *mut CURL) -> CURLMcode; ++ pub fn curl_multi_fdset(multi_handle: *mut CURLM, ++ read_fd_set: *mut fd_set, ++ write_fd_set: *mut fd_set, ++ exc_fd_set: *mut fd_set, ++ max_fd: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_wait(multi_handle: *mut CURLM, ++ extra_fds: *mut curl_waitfd, ++ extra_nfds: c_uint, ++ timeout_ms: c_int, ++ ret: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_perform(multi_handle: *mut CURLM, ++ running_handles: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_cleanup(multi_handle: *mut CURLM) -> CURLMcode; ++ pub fn curl_multi_info_read(multi_handle: *mut CURLM, ++ msgs_in_queue: *mut c_int) -> *mut CURLMsg; ++ pub fn curl_multi_strerror(code: CURLMcode) -> *const c_char; ++ pub fn curl_multi_socket(multi_handle: *mut CURLM, ++ s: curl_socket_t, ++ running_handles: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_socket_action(multi_handle: *mut CURLM, ++ s: curl_socket_t, ++ ev_bitmask: c_int, ++ running_handles: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_socket_all(multi_handle: *mut CURLM, ++ running_handles: *mut c_int) -> CURLMcode; ++ pub fn curl_multi_timeout(multi_handle: *mut CURLM, ++ milliseconds: *mut c_long) -> CURLMcode; ++ pub fn curl_multi_setopt(multi_handle: *mut CURLM, ++ option: CURLMoption, ++ ...) -> CURLMcode; ++ pub fn curl_multi_assign(multi_handle: *mut CURLM, ++ sockfd: curl_socket_t, ++ sockp: *mut c_void) -> CURLMcode; ++} diff --cc vendor/env_logger-0.5.12/.cargo-checksum.json index 000000000,000000000..c02e50847 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"f4d7e69c283751083d53d01eac767407343b8b69c4bd70058e08adc2637cb257"} diff --cc vendor/env_logger-0.5.12/.travis.yml index 000000000,000000000..f42848d05 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/.travis.yml @@@ -1,0 -1,0 +1,20 @@@ ++language: rust ++sudo: false ++rust: ++ - 1.20.0 ++ - stable ++ - beta ++ - nightly ++before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++script: ++ - cargo build --verbose ++ - cargo build --verbose --no-default-features ++ - cargo test --verbose ++ - cargo test --verbose --no-default-features ++after_success: ++ - travis-cargo --only nightly doc-upload ++ ++notifications: ++ email: ++on_success: never diff --cc vendor/env_logger-0.5.12/Cargo.toml index 000000000,000000000..97047515e new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/Cargo.toml @@@ -1,0 -1,0 +1,50 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "env_logger" ++version = "0.5.12" ++authors = ["The Rust Project Developers"] ++description = "A logging implementation for `log` which is configured via an environment\nvariable.\n" ++documentation = "https://docs.rs/env_logger" ++readme = "README.md" ++keywords = ["logging", "log", "logger"] ++categories = ["development-tools::debugging"] ++license = "MIT/Apache-2.0" ++repository = "https://github.com/sebasmagri/env_logger/" ++ ++[[test]] ++name = "regexp_filter" ++harness = false ++ ++[[test]] ++name = "log-in-log" ++harness = false ++[dependencies.atty] ++version = "0.2" ++ ++[dependencies.humantime] ++version = "1.1" ++ ++[dependencies.log] ++version = "0.4" ++features = ["std"] ++ ++[dependencies.regex] ++version = "1" ++optional = true ++ ++[dependencies.termcolor] ++version = "1" ++ ++[features] ++default = ["regex"] diff --cc vendor/env_logger-0.5.12/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/env_logger-0.5.12/LICENSE-MIT index 000000000,000000000..39d4bdb5a new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2014 The Rust Project Developers ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/env_logger-0.5.12/README.md index 000000000,000000000..e868842fb new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/README.md @@@ -1,0 -1,0 +1,140 @@@ ++env_logger [![Build Status](https://travis-ci.org/sebasmagri/env_logger.svg?branch=master)](https://travis-ci.org/sebasmagri/env_logger) [![Maintenance](https://img.shields.io/badge/maintenance-actively%20maintained-brightgreen.svg)](https://github.com/sebasmagri/env_logger) [![crates.io](https://img.shields.io/crates/v/env_logger.svg)](https://crates.io/crates/env_logger) [![Documentation](https://img.shields.io/badge/docs-current-blue.svg)](https://docs.rs/env_logger) ++========== ++ ++Implements a logger that can be configured via environment variables. ++ ++## Usage ++ ++### In libraries ++ ++`env_logger` makes sense when used in executables (binary projects). Libraries should use the [`log`](https://doc.rust-lang.org/log) crate instead. ++ ++### In executables ++ ++It must be added along with `log` to the project dependencies: ++ ++```toml ++[dependencies] ++log = "0.4.0" ++env_logger = "0.5.12" ++``` ++ ++`env_logger` must be initialized as early as possible in the project. After it's initialized, you can use the `log` macros to do actual logging. ++ ++```rust ++#[macro_use] ++extern crate log; ++extern crate env_logger; ++ ++fn main() { ++ env_logger::init(); ++ ++ info!("starting up"); ++ ++ // ... ++} ++``` ++ ++Then when running the executable, specify a value for the `RUST_LOG` ++environment variable that corresponds with the log messages you want to show. ++ ++```bash ++$ RUST_LOG=info ./main ++INFO: 2017-11-09T02:12:24Z: main: starting up ++``` ++ ++### In tests ++ ++Tests can use the `env_logger` crate to see log messages generated during that test: ++ ++```toml ++[dependencies] ++log = "0.4.0" ++ ++[dev-dependencies] ++env_logger = "0.5.12" ++``` ++ ++```rust ++#[macro_use] ++extern crate log; ++ ++fn add_one(num: i32) -> i32 { ++ info!("add_one called with {}", num); ++ num + 1 ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ extern crate env_logger; ++ ++ #[test] ++ fn it_adds_one() { ++ let _ = env_logger::try_init(); ++ info!("can log from the test too"); ++ assert_eq!(3, add_one(2)); ++ } ++ ++ #[test] ++ fn it_handles_negative_numbers() { ++ let _ = env_logger::try_init(); ++ info!("logging from another test"); ++ assert_eq!(-7, add_one(-8)); ++ } ++} ++``` ++ ++Assuming the module under test is called `my_lib`, running the tests with the ++`RUST_LOG` filtering to info messages from this module looks like: ++ ++```bash ++$ RUST_LOG=my_lib=info cargo test ++ Running target/debug/my_lib-... ++ ++running 2 tests ++INFO: 2017-11-09T02:12:24Z: my_lib::tests: logging from another test ++INFO: 2017-11-09T02:12:24Z: my_lib: add_one called with -8 ++test tests::it_handles_negative_numbers ... ok ++INFO: 2017-11-09T02:12:24Z: my_lib::tests: can log from the test too ++INFO: 2017-11-09T02:12:24Z: my_lib: add_one called with 2 ++test tests::it_adds_one ... ok ++ ++test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured ++``` ++ ++Note that `env_logger::try_init()` needs to be called in each test in which you ++want to enable logging. Additionally, the default behavior of tests to ++run in parallel means that logging output may be interleaved with test output. ++Either run tests in a single thread by specifying `RUST_TEST_THREADS=1` or by ++running one test by specifying its name as an argument to the test binaries as ++directed by the `cargo test` help docs: ++ ++```bash ++$ RUST_LOG=my_lib=info cargo test it_adds_one ++ Running target/debug/my_lib-... ++ ++running 1 test ++INFO: 2017-11-09T02:12:24Z: my_lib::tests: can log from the test too ++INFO: 2017-11-09T02:12:24Z: my_lib: add_one called with 2 ++test tests::it_adds_one ... ok ++ ++test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured ++``` ++ ++## Configuring log target ++ ++By default, `env_logger` logs to stderr. If you want to log to stdout instead, ++you can use the `Builder` to change the log target: ++ ++```rust ++use std::env; ++use env_logger::{Builder, Target}; ++ ++let mut builder = Builder::new(); ++builder.target(Target::Stdout); ++if env::var("RUST_LOG").is_ok() { ++ builder.parse(&env::var("RUST_LOG").unwrap()); ++} ++builder.init(); ++``` diff --cc vendor/env_logger-0.5.12/examples/custom_default_format.rs index 000000000,000000000..d1a45b608 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/examples/custom_default_format.rs @@@ -1,0 -1,0 +1,44 @@@ ++/*! ++Disabling parts of the default format. ++ ++Before running this example, try setting the `MY_LOG_LEVEL` environment variable to `info`: ++ ++```no_run,shell ++$ export MY_LOG_LEVEL='info' ++``` ++ ++Also try setting the `MY_LOG_STYLE` environment variable to `never` to disable colors ++or `auto` to enable them: ++ ++```no_run,shell ++$ export MY_LOG_STYLE=never ++``` ++ ++If you want to control the logging output completely, see the `custom_logger` example. ++*/ ++ ++#[macro_use] ++extern crate log; ++extern crate env_logger; ++ ++use env_logger::{Env, Builder}; ++ ++fn init_logger() { ++ let env = Env::default() ++ .filter("MY_LOG_LEVEL") ++ .write_style("MY_LOG_STYLE"); ++ ++ let mut builder = Builder::from_env(env); ++ ++ builder ++ .default_format_level(false) ++ .default_format_timestamp_nanos(true); ++ ++ builder.init(); ++} ++ ++fn main() { ++ init_logger(); ++ ++ info!("a log from `MyLogger`"); ++} diff --cc vendor/env_logger-0.5.12/examples/custom_format.rs index 000000000,000000000..68a064d48 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/examples/custom_format.rs @@@ -1,0 -1,0 +1,52 @@@ ++/*! ++Changing the default logging format. ++ ++Before running this example, try setting the `MY_LOG_LEVEL` environment variable to `info`: ++ ++```no_run,shell ++$ export MY_LOG_LEVEL='info' ++``` ++ ++Also try setting the `MY_LOG_STYLE` environment variable to `never` to disable colors ++or `auto` to enable them: ++ ++```no_run,shell ++$ export MY_LOG_STYLE=never ++``` ++ ++If you want to control the logging output completely, see the `custom_logger` example. ++*/ ++ ++#[macro_use] ++extern crate log; ++extern crate env_logger; ++ ++use std::io::Write; ++ ++use env_logger::{Env, Builder, fmt}; ++ ++fn init_logger() { ++ let env = Env::default() ++ .filter("MY_LOG_LEVEL") ++ .write_style("MY_LOG_STYLE"); ++ ++ let mut builder = Builder::from_env(env); ++ ++ // Use a different format for writing log records ++ builder.format(|buf, record| { ++ let mut style = buf.style(); ++ style.set_bg(fmt::Color::Yellow).set_bold(true); ++ ++ let timestamp = buf.timestamp(); ++ ++ writeln!(buf, "My formatted log ({}): {}", timestamp, style.value(record.args())) ++ }); ++ ++ builder.init(); ++} ++ ++fn main() { ++ init_logger(); ++ ++ info!("a log from `MyLogger`"); ++} diff --cc vendor/env_logger-0.5.12/examples/custom_logger.rs index 000000000,000000000..792c9c8e5 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/examples/custom_logger.rs @@@ -1,0 -1,0 +1,60 @@@ ++/*! ++Using `env_logger` to drive a custom logger. ++ ++Before running this example, try setting the `MY_LOG_LEVEL` environment variable to `info`: ++ ++```no_run,shell ++$ export MY_LOG_LEVEL='info' ++``` ++ ++If you only want to change the way logs are formatted, look at the `custom_format` example. ++*/ ++ ++#[macro_use] ++extern crate log; ++extern crate env_logger; ++use env_logger::filter::Filter; ++use log::{Log, Metadata, Record, SetLoggerError}; ++ ++struct MyLogger { ++ inner: Filter ++} ++ ++impl MyLogger { ++ fn new() -> MyLogger { ++ use env_logger::filter::Builder; ++ let mut builder = Builder::from_env("MY_LOG_LEVEL"); ++ ++ MyLogger { ++ inner: builder.build() ++ } ++ } ++ ++ fn init() -> Result<(), SetLoggerError> { ++ let logger = Self::new(); ++ ++ log::set_max_level(logger.inner.filter()); ++ log::set_boxed_logger(Box::new(logger)) ++ } ++} ++ ++impl Log for MyLogger { ++ fn enabled(&self, metadata: &Metadata) -> bool { ++ self.inner.enabled(metadata) ++ } ++ ++ fn log(&self, record: &Record) { ++ // Check if the record is matched by the logger before logging ++ if self.inner.matches(record) { ++ println!("{} - {}", record.level(), record.args()); ++ } ++ } ++ ++ fn flush(&self) { } ++} ++ ++fn main() { ++ MyLogger::init().unwrap(); ++ ++ info!("a log from `MyLogger`"); ++} diff --cc vendor/env_logger-0.5.12/examples/default.rs index 000000000,000000000..5d799fe05 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/examples/default.rs @@@ -1,0 -1,0 +1,36 @@@ ++/*! ++Using `env_logger`. ++ ++Before running this example, try setting the `MY_LOG_LEVEL` environment variable to `info`: ++ ++```no_run,shell ++$ export MY_LOG_LEVEL='info' ++``` ++ ++Also try setting the `MY_LOG_STYLE` environment variable to `never` to disable colors ++or `auto` to enable them: ++ ++```no_run,shell ++$ export MY_LOG_STYLE=never ++``` ++*/ ++ ++#[macro_use] ++extern crate log; ++extern crate env_logger; ++ ++use env_logger::Env; ++ ++fn main() { ++ let env = Env::default() ++ .filter_or("MY_LOG_LEVEL", "trace") ++ .write_style_or("MY_LOG_STYLE", "always"); ++ ++ env_logger::init_from_env(env); ++ ++ trace!("some trace log"); ++ debug!("some debug log"); ++ info!("some information log"); ++ warn!("some warning log"); ++ error!("some error log"); ++} diff --cc vendor/env_logger-0.5.12/examples/direct_logger.rs index 000000000,000000000..410230bcd new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/examples/direct_logger.rs @@@ -1,0 -1,0 +1,40 @@@ ++/*! ++Using `env_logger::Logger` and the `log::Log` trait directly. ++ ++This example doesn't rely on environment variables, or having a static logger installed. ++*/ ++ ++extern crate log; ++extern crate env_logger; ++ ++fn record() -> log::Record<'static> { ++ let error_metadata = log::MetadataBuilder::new() ++ .target("myApp") ++ .level(log::Level::Error) ++ .build(); ++ ++ log::Record::builder() ++ .metadata(error_metadata) ++ .args(format_args!("Error!")) ++ .line(Some(433)) ++ .file(Some("app.rs")) ++ .module_path(Some("server")) ++ .build() ++} ++ ++fn main() { ++ use log::Log; ++ ++ let stylish_logger = env_logger::Builder::new() ++ .filter(None, log::LevelFilter::Error) ++ .write_style(env_logger::WriteStyle::Always) ++ .build(); ++ ++ let unstylish_logger = env_logger::Builder::new() ++ .filter(None, log::LevelFilter::Error) ++ .write_style(env_logger::WriteStyle::Never) ++ .build(); ++ ++ stylish_logger.log(&record()); ++ unstylish_logger.log(&record()); ++} diff --cc vendor/env_logger-0.5.12/src/filter/mod.rs index 000000000,000000000..80a3f30fa new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/src/filter/mod.rs @@@ -1,0 -1,0 +1,568 @@@ ++//! Filtering for log records. ++//! ++//! This module contains the log filtering used by `env_logger` to match records. ++//! You can use the `Filter` type in your own logger implementation to use the same ++//! filter parsing and matching as `env_logger`. For more details about the format ++//! for directive strings see [Enabling Logging]. ++//! ++//! ## Using `env_logger` in your own logger ++//! ++//! You can use `env_logger`'s filtering functionality with your own logger. ++//! Call [`Builder::parse`] to parse directives from a string when constructing ++//! your logger. Call [`Filter::matches`] to check whether a record should be ++//! logged based on the parsed filters when log records are received. ++//! ++//! ``` ++//! extern crate log; ++//! extern crate env_logger; ++//! use env_logger::filter::Filter; ++//! use log::{Log, Metadata, Record}; ++//! ++//! struct MyLogger { ++//! filter: Filter ++//! } ++//! ++//! impl MyLogger { ++//! fn new() -> MyLogger { ++//! use env_logger::filter::Builder; ++//! let mut builder = Builder::new(); ++//! ++//! // Parse a directives string from an environment variable ++//! if let Ok(ref filter) = std::env::var("MY_LOG_LEVEL") { ++//! builder.parse(filter); ++//! } ++//! ++//! MyLogger { ++//! filter: builder.build() ++//! } ++//! } ++//! } ++//! ++//! impl Log for MyLogger { ++//! fn enabled(&self, metadata: &Metadata) -> bool { ++//! self.filter.enabled(metadata) ++//! } ++//! ++//! fn log(&self, record: &Record) { ++//! // Check if the record is matched by the filter ++//! if self.filter.matches(record) { ++//! println!("{:?}", record); ++//! } ++//! } ++//! ++//! fn flush(&self) {} ++//! } ++//! # fn main() {} ++//! ``` ++//! ++//! [Enabling Logging]: ../index.html#enabling-logging ++//! [`Builder::parse`]: struct.Builder.html#method.parse ++//! [`Filter::matches`]: struct.Filter.html#method.matches ++ ++use std::env; ++use std::mem; ++use std::fmt; ++use log::{Level, LevelFilter, Record, Metadata}; ++ ++#[cfg(feature = "regex")] ++#[path = "regex.rs"] ++mod inner; ++ ++#[cfg(not(feature = "regex"))] ++#[path = "string.rs"] ++mod inner; ++ ++/// A log filter. ++/// ++/// This struct can be used to determine whether or not a log record ++/// should be written to the output. ++/// Use the [`Builder`] type to parse and construct a `Filter`. ++/// ++/// [`Builder`]: struct.Builder.html ++pub struct Filter { ++ directives: Vec, ++ filter: Option, ++} ++ ++/// A builder for a log filter. ++/// ++/// It can be used to parse a set of directives from a string before building ++/// a [`Filter`] instance. ++/// ++/// ## Example ++/// ++/// ``` ++/// #[macro_use] ++/// extern crate log; ++/// extern crate env_logger; ++/// ++/// use std::env; ++/// use std::io; ++/// use env_logger::filter::Builder; ++/// ++/// fn main() { ++/// let mut builder = Builder::new(); ++/// ++/// // Parse a logging filter from an environment variable. ++/// if let Ok(rust_log) = env::var("RUST_LOG") { ++/// builder.parse(&rust_log); ++/// } ++/// ++/// let filter = builder.build(); ++/// } ++/// ``` ++/// ++/// [`Filter`]: struct.Filter.html ++pub struct Builder { ++ directives: Vec, ++ filter: Option, ++} ++ ++#[derive(Debug)] ++struct Directive { ++ name: Option, ++ level: LevelFilter, ++} ++ ++impl Filter { ++ /// Returns the maximum `LevelFilter` that this filter instance is ++ /// configured to output. ++ /// ++ /// # Example ++ /// ++ /// ```rust ++ /// extern crate log; ++ /// extern crate env_logger; ++ /// ++ /// use log::LevelFilter; ++ /// use env_logger::filter::Builder; ++ /// ++ /// fn main() { ++ /// let mut builder = Builder::new(); ++ /// builder.filter(Some("module1"), LevelFilter::Info); ++ /// builder.filter(Some("module2"), LevelFilter::Error); ++ /// ++ /// let filter = builder.build(); ++ /// assert_eq!(filter.filter(), LevelFilter::Info); ++ /// } ++ /// ``` ++ pub fn filter(&self) -> LevelFilter { ++ self.directives.iter() ++ .map(|d| d.level) ++ .max() ++ .unwrap_or(LevelFilter::Off) ++ } ++ ++ /// Checks if this record matches the configured filter. ++ pub fn matches(&self, record: &Record) -> bool { ++ if !self.enabled(record.metadata()) { ++ return false; ++ } ++ ++ if let Some(filter) = self.filter.as_ref() { ++ if !filter.is_match(&*record.args().to_string()) { ++ return false; ++ } ++ } ++ ++ true ++ } ++ ++ /// Determines if a log message with the specified metadata would be logged. ++ pub fn enabled(&self, metadata: &Metadata) -> bool { ++ let level = metadata.level(); ++ let target = metadata.target(); ++ ++ enabled(&self.directives, level, target) ++ } ++} ++ ++impl Builder { ++ /// Initializes the filter builder with defaults. ++ pub fn new() -> Builder { ++ Builder { ++ directives: Vec::new(), ++ filter: None, ++ } ++ } ++ ++ /// Initializes the filter builder from an environment. ++ pub fn from_env(env: &str) -> Builder { ++ let mut builder = Builder::new(); ++ ++ if let Ok(s) = env::var(env) { ++ builder.parse(&s); ++ } ++ ++ builder ++ } ++ ++ /// Adds a directive to the filter for a specific module. ++ pub fn filter_module(&mut self, module: &str, level: LevelFilter) -> &mut Self { ++ self.filter(Some(module), level) ++ } ++ ++ /// Adds a directive to the filter for all modules. ++ pub fn filter_level(&mut self, level: LevelFilter) -> &mut Self { ++ self.filter(None, level) ++ } ++ ++ /// Adds a directive to the filter. ++ /// ++ /// The given module (if any) will log at most the specified level provided. ++ /// If no module is provided then the filter will apply to all log messages. ++ pub fn filter(&mut self, ++ module: Option<&str>, ++ level: LevelFilter) -> &mut Self { ++ self.directives.push(Directive { ++ name: module.map(|s| s.to_string()), ++ level, ++ }); ++ self ++ } ++ ++ /// Parses the directives string. ++ /// ++ /// See the [Enabling Logging] section for more details. ++ /// ++ /// [Enabling Logging]: ../index.html#enabling-logging ++ pub fn parse(&mut self, filters: &str) -> &mut Self { ++ let (directives, filter) = parse_spec(filters); ++ ++ self.filter = filter; ++ ++ for directive in directives { ++ self.directives.push(directive); ++ } ++ self ++ } ++ ++ /// Build a log filter. ++ pub fn build(&mut self) -> Filter { ++ if self.directives.is_empty() { ++ // Adds the default filter if none exist ++ self.directives.push(Directive { ++ name: None, ++ level: LevelFilter::Error, ++ }); ++ } else { ++ // Sort the directives by length of their name, this allows a ++ // little more efficient lookup at runtime. ++ self.directives.sort_by(|a, b| { ++ let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); ++ let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); ++ alen.cmp(&blen) ++ }); ++ } ++ ++ Filter { ++ directives: mem::replace(&mut self.directives, Vec::new()), ++ filter: mem::replace(&mut self.filter, None), ++ } ++ } ++} ++ ++impl Default for Builder { ++ fn default() -> Self { ++ Builder::new() ++ } ++} ++ ++impl fmt::Debug for Filter { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Filter") ++ .field("filter", &self.filter) ++ .field("directives", &self.directives) ++ .finish() ++ } ++} ++ ++impl fmt::Debug for Builder { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Filter") ++ .field("filter", &self.filter) ++ .field("directives", &self.directives) ++ .finish() ++ } ++} ++ ++/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") ++/// and return a vector with log directives. ++fn parse_spec(spec: &str) -> (Vec, Option) { ++ let mut dirs = Vec::new(); ++ ++ let mut parts = spec.split('/'); ++ let mods = parts.next(); ++ let filter = parts.next(); ++ if parts.next().is_some() { ++ println!("warning: invalid logging spec '{}', \ ++ ignoring it (too many '/'s)", spec); ++ return (dirs, None); ++ } ++ mods.map(|m| { for s in m.split(',') { ++ if s.len() == 0 { continue } ++ let mut parts = s.split('='); ++ let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { ++ (Some(part0), None, None) => { ++ // if the single argument is a log-level string or number, ++ // treat that as a global fallback ++ match part0.parse() { ++ Ok(num) => (num, None), ++ Err(_) => (LevelFilter::max(), Some(part0)), ++ } ++ } ++ (Some(part0), Some(""), None) => (LevelFilter::max(), Some(part0)), ++ (Some(part0), Some(part1), None) => { ++ match part1.parse() { ++ Ok(num) => (num, Some(part0)), ++ _ => { ++ println!("warning: invalid logging spec '{}', \ ++ ignoring it", part1); ++ continue ++ } ++ } ++ }, ++ _ => { ++ println!("warning: invalid logging spec '{}', \ ++ ignoring it", s); ++ continue ++ } ++ }; ++ dirs.push(Directive { ++ name: name.map(|s| s.to_string()), ++ level: log_level, ++ }); ++ }}); ++ ++ let filter = filter.map_or(None, |filter| { ++ match inner::Filter::new(filter) { ++ Ok(re) => Some(re), ++ Err(e) => { ++ println!("warning: invalid regex filter - {}", e); ++ None ++ } ++ } ++ }); ++ ++ return (dirs, filter); ++} ++ ++ ++// Check whether a level and target are enabled by the set of directives. ++fn enabled(directives: &[Directive], level: Level, target: &str) -> bool { ++ // Search for the longest match, the vector is assumed to be pre-sorted. ++ for directive in directives.iter().rev() { ++ match directive.name { ++ Some(ref name) if !target.starts_with(&**name) => {}, ++ Some(..) | None => { ++ return level <= directive.level ++ } ++ } ++ } ++ false ++} ++ ++#[cfg(test)] ++mod tests { ++ use log::{Level, LevelFilter}; ++ ++ use super::{Builder, Filter, Directive, parse_spec, enabled}; ++ ++ fn make_logger_filter(dirs: Vec) -> Filter { ++ let mut logger = Builder::new().build(); ++ logger.directives = dirs; ++ logger ++ } ++ ++ #[test] ++ fn filter_info() { ++ let logger = Builder::new().filter(None, LevelFilter::Info).build(); ++ assert!(enabled(&logger.directives, Level::Info, "crate1")); ++ assert!(!enabled(&logger.directives, Level::Debug, "crate1")); ++ } ++ ++ #[test] ++ fn filter_beginning_longest_match() { ++ let logger = Builder::new() ++ .filter(Some("crate2"), LevelFilter::Info) ++ .filter(Some("crate2::mod"), LevelFilter::Debug) ++ .filter(Some("crate1::mod1"), LevelFilter::Warn) ++ .build(); ++ assert!(enabled(&logger.directives, Level::Debug, "crate2::mod1")); ++ assert!(!enabled(&logger.directives, Level::Debug, "crate2")); ++ } ++ ++ #[test] ++ fn parse_default() { ++ let logger = Builder::new().parse("info,crate1::mod1=warn").build(); ++ assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); ++ assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); ++ } ++ ++ #[test] ++ fn match_full_path() { ++ let logger = make_logger_filter(vec![ ++ Directive { ++ name: Some("crate2".to_string()), ++ level: LevelFilter::Info ++ }, ++ Directive { ++ name: Some("crate1::mod1".to_string()), ++ level: LevelFilter::Warn ++ } ++ ]); ++ assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); ++ assert!(!enabled(&logger.directives, Level::Info, "crate1::mod1")); ++ assert!(enabled(&logger.directives, Level::Info, "crate2")); ++ assert!(!enabled(&logger.directives, Level::Debug, "crate2")); ++ } ++ ++ #[test] ++ fn no_match() { ++ let logger = make_logger_filter(vec![ ++ Directive { name: Some("crate2".to_string()), level: LevelFilter::Info }, ++ Directive { name: Some("crate1::mod1".to_string()), level: LevelFilter::Warn } ++ ]); ++ assert!(!enabled(&logger.directives, Level::Warn, "crate3")); ++ } ++ ++ #[test] ++ fn match_beginning() { ++ let logger = make_logger_filter(vec![ ++ Directive { name: Some("crate2".to_string()), level: LevelFilter::Info }, ++ Directive { name: Some("crate1::mod1".to_string()), level: LevelFilter::Warn } ++ ]); ++ assert!(enabled(&logger.directives, Level::Info, "crate2::mod1")); ++ } ++ ++ #[test] ++ fn match_beginning_longest_match() { ++ let logger = make_logger_filter(vec![ ++ Directive { name: Some("crate2".to_string()), level: LevelFilter::Info }, ++ Directive { name: Some("crate2::mod".to_string()), level: LevelFilter::Debug }, ++ Directive { name: Some("crate1::mod1".to_string()), level: LevelFilter::Warn } ++ ]); ++ assert!(enabled(&logger.directives, Level::Debug, "crate2::mod1")); ++ assert!(!enabled(&logger.directives, Level::Debug, "crate2")); ++ } ++ ++ #[test] ++ fn match_default() { ++ let logger = make_logger_filter(vec![ ++ Directive { name: None, level: LevelFilter::Info }, ++ Directive { name: Some("crate1::mod1".to_string()), level: LevelFilter::Warn } ++ ]); ++ assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); ++ assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); ++ } ++ ++ #[test] ++ fn zero_level() { ++ let logger = make_logger_filter(vec![ ++ Directive { name: None, level: LevelFilter::Info }, ++ Directive { name: Some("crate1::mod1".to_string()), level: LevelFilter::Off } ++ ]); ++ assert!(!enabled(&logger.directives, Level::Error, "crate1::mod1")); ++ assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); ++ } ++ ++ #[test] ++ fn parse_spec_valid() { ++ let (dirs, filter) = parse_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); ++ assert_eq!(dirs.len(), 3); ++ assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Error); ++ ++ assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); ++ assert_eq!(dirs[1].level, LevelFilter::max()); ++ ++ assert_eq!(dirs[2].name, Some("crate2".to_string())); ++ assert_eq!(dirs[2].level, LevelFilter::Debug); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_invalid_crate() { ++ // test parse_spec with multiple = in specification ++ let (dirs, filter) = parse_spec("crate1::mod1=warn=info,crate2=debug"); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate2".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Debug); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_invalid_level() { ++ // test parse_spec with 'noNumber' as log level ++ let (dirs, filter) = parse_spec("crate1::mod1=noNumber,crate2=debug"); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate2".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Debug); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_string_level() { ++ // test parse_spec with 'warn' as log level ++ let (dirs, filter) = parse_spec("crate1::mod1=wrong,crate2=warn"); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate2".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Warn); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_empty_level() { ++ // test parse_spec with '' as log level ++ let (dirs, filter) = parse_spec("crate1::mod1=wrong,crate2="); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate2".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::max()); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_global() { ++ // test parse_spec with no crate ++ let (dirs, filter) = parse_spec("warn,crate2=debug"); ++ assert_eq!(dirs.len(), 2); ++ assert_eq!(dirs[0].name, None); ++ assert_eq!(dirs[0].level, LevelFilter::Warn); ++ assert_eq!(dirs[1].name, Some("crate2".to_string())); ++ assert_eq!(dirs[1].level, LevelFilter::Debug); ++ assert!(filter.is_none()); ++ } ++ ++ #[test] ++ fn parse_spec_valid_filter() { ++ let (dirs, filter) = parse_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); ++ assert_eq!(dirs.len(), 3); ++ assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Error); ++ ++ assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); ++ assert_eq!(dirs[1].level, LevelFilter::max()); ++ ++ assert_eq!(dirs[2].name, Some("crate2".to_string())); ++ assert_eq!(dirs[2].level, LevelFilter::Debug); ++ assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); ++ } ++ ++ #[test] ++ fn parse_spec_invalid_crate_filter() { ++ let (dirs, filter) = parse_spec("crate1::mod1=error=warn,crate2=debug/a.c"); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate2".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::Debug); ++ assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); ++ } ++ ++ #[test] ++ fn parse_spec_empty_with_filter() { ++ let (dirs, filter) = parse_spec("crate1/a*c"); ++ assert_eq!(dirs.len(), 1); ++ assert_eq!(dirs[0].name, Some("crate1".to_string())); ++ assert_eq!(dirs[0].level, LevelFilter::max()); ++ assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); ++ } ++} diff --cc vendor/env_logger-0.5.12/src/filter/regex.rs index 000000000,000000000..a04265413 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/src/filter/regex.rs @@@ -1,0 -1,0 +1,29 @@@ ++extern crate regex; ++ ++use std::fmt; ++ ++use self::regex::Regex; ++ ++#[derive(Debug)] ++pub struct Filter { ++ inner: Regex, ++} ++ ++impl Filter { ++ pub fn new(spec: &str) -> Result { ++ match Regex::new(spec){ ++ Ok(r) => Ok(Filter { inner: r }), ++ Err(e) => Err(e.to_string()), ++ } ++ } ++ ++ pub fn is_match(&self, s: &str) -> bool { ++ self.inner.is_match(s) ++ } ++} ++ ++impl fmt::Display for Filter { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.inner.fmt(f) ++ } ++} diff --cc vendor/env_logger-0.5.12/src/filter/string.rs index 000000000,000000000..96d7ecca1 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/src/filter/string.rs @@@ -1,0 -1,0 +1,22 @@@ ++use std::fmt; ++ ++#[derive(Debug)] ++pub struct Filter { ++ inner: String, ++} ++ ++impl Filter { ++ pub fn new(spec: &str) -> Result { ++ Ok(Filter { inner: spec.to_string() }) ++ } ++ ++ pub fn is_match(&self, s: &str) -> bool { ++ s.contains(&self.inner) ++ } ++} ++ ++impl fmt::Display for Filter { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.inner.fmt(f) ++ } ++} diff --cc vendor/env_logger-0.5.12/src/fmt.rs index 000000000,000000000..465bbecf6 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/src/fmt.rs @@@ -1,0 -1,0 +1,830 @@@ ++//! Formatting for log records. ++//! ++//! This module contains a [`Formatter`] that can be used to format log records ++//! into without needing temporary allocations. Usually you won't need to worry ++//! about the contents of this module and can use the `Formatter` like an ordinary ++//! [`Write`]. ++//! ++//! # Formatting log records ++//! ++//! The format used to print log records can be customised using the [`Builder::format`] ++//! method. ++//! Custom formats can apply different color and weight to printed values using ++//! [`Style`] builders. ++//! ++//! ``` ++//! use std::io::Write; ++//! use env_logger::fmt::Color; ++//! ++//! let mut builder = env_logger::Builder::new(); ++//! ++//! builder.format(|buf, record| { ++//! let mut level_style = buf.style(); ++//! ++//! level_style.set_color(Color::Red).set_bold(true); ++//! ++//! writeln!(buf, "{}: {}", ++//! level_style.value(record.level()), ++//! record.args()) ++//! }); ++//! ``` ++//! ++//! [`Formatter`]: struct.Formatter.html ++//! [`Style`]: struct.Style.html ++//! [`Builder::format`]: ../struct.Builder.html#method.format ++//! [`Write`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html ++ ++use std::io::prelude::*; ++use std::{io, fmt}; ++use std::rc::Rc; ++use std::str::FromStr; ++use std::error::Error; ++use std::cell::RefCell; ++use std::time::SystemTime; ++ ++use termcolor::{self, ColorSpec, ColorChoice, Buffer, BufferWriter, WriteColor}; ++use atty; ++use humantime::{format_rfc3339_seconds, format_rfc3339_nanos}; ++ ++/// A formatter to write logs into. ++/// ++/// `Formatter` implements the standard [`Write`] trait for writing log records. ++/// It also supports terminal colors, through the [`style`] method. ++/// ++/// # Examples ++/// ++/// Use the [`writeln`] macro to easily format a log record: ++/// ++/// ``` ++/// use std::io::Write; ++/// ++/// let mut builder = env_logger::Builder::new(); ++/// ++/// builder.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())); ++/// ``` ++/// ++/// [`Write`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html ++/// [`writeln`]: https://doc.rust-lang.org/stable/std/macro.writeln.html ++/// [`style`]: #method.style ++pub struct Formatter { ++ buf: Rc>, ++ write_style: WriteStyle, ++} ++ ++/// A set of styles to apply to the terminal output. ++/// ++/// Call [`Formatter::style`] to get a `Style` and use the builder methods to ++/// set styling properties, like [color] and [weight]. ++/// To print a value using the style, wrap it in a call to [`value`] when the log ++/// record is formatted. ++/// ++/// # Examples ++/// ++/// Create a bold, red colored style and use it to print the log level: ++/// ++/// ``` ++/// use std::io::Write; ++/// use env_logger::fmt::Color; ++/// ++/// let mut builder = env_logger::Builder::new(); ++/// ++/// builder.format(|buf, record| { ++/// let mut level_style = buf.style(); ++/// ++/// level_style.set_color(Color::Red).set_bold(true); ++/// ++/// writeln!(buf, "{}: {}", ++/// level_style.value(record.level()), ++/// record.args()) ++/// }); ++/// ``` ++/// ++/// Styles can be re-used to output multiple values: ++/// ++/// ``` ++/// use std::io::Write; ++/// use env_logger::fmt::Color; ++/// ++/// let mut builder = env_logger::Builder::new(); ++/// ++/// builder.format(|buf, record| { ++/// let mut bold = buf.style(); ++/// ++/// bold.set_bold(true); ++/// ++/// writeln!(buf, "{}: {} {}", ++/// bold.value(record.level()), ++/// bold.value("some bold text"), ++/// record.args()) ++/// }); ++/// ``` ++/// ++/// [`Formatter::style`]: struct.Formatter.html#method.style ++/// [color]: #method.set_color ++/// [weight]: #method.set_bold ++/// [`value`]: #method.value ++#[derive(Clone)] ++pub struct Style { ++ buf: Rc>, ++ spec: ColorSpec, ++} ++ ++/// A value that can be printed using the given styles. ++/// ++/// It is the result of calling [`Style::value`]. ++/// ++/// [`Style::value`]: struct.Style.html#method.value ++pub struct StyledValue<'a, T> { ++ style: &'a Style, ++ value: T, ++} ++ ++/// An [RFC3339] formatted timestamp. ++/// ++/// The timestamp implements [`Display`] and can be written to a [`Formatter`]. ++/// ++/// [RFC3339]: https://www.ietf.org/rfc/rfc3339.txt ++/// [`Display`]: https://doc.rust-lang.org/stable/std/fmt/trait.Display.html ++/// [`Formatter`]: struct.Formatter.html ++pub struct Timestamp(SystemTime); ++ ++/// An [RFC3339] formatted timestamp with nanos ++#[derive(Debug)] ++pub struct PreciseTimestamp(SystemTime); ++ ++/// Log target, either `stdout` or `stderr`. ++#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] ++pub enum Target { ++ /// Logs will be sent to standard output. ++ Stdout, ++ /// Logs will be sent to standard error. ++ Stderr, ++} ++ ++impl Default for Target { ++ fn default() -> Self { ++ Target::Stderr ++ } ++} ++ ++/// Whether or not to print styles to the target. ++#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] ++pub enum WriteStyle { ++ /// Try to print styles, but don't force the issue. ++ Auto, ++ /// Try very hard to print styles. ++ Always, ++ /// Never print styles. ++ Never, ++} ++ ++impl Default for WriteStyle { ++ fn default() -> Self { ++ WriteStyle::Auto ++ } ++} ++ ++/// A terminal target with color awareness. ++pub(crate) struct Writer { ++ inner: BufferWriter, ++ write_style: WriteStyle, ++} ++ ++impl Writer { ++ pub(crate) fn write_style(&self) -> WriteStyle { ++ self.write_style ++ } ++} ++ ++/// A builder for a terminal writer. ++/// ++/// The target and style choice can be configured before building. ++pub(crate) struct Builder { ++ target: Target, ++ write_style: WriteStyle, ++} ++ ++impl Builder { ++ /// Initialize the writer builder with defaults. ++ pub fn new() -> Self { ++ Builder { ++ target: Default::default(), ++ write_style: Default::default(), ++ } ++ } ++ ++ /// Set the target to write to. ++ pub fn target(&mut self, target: Target) -> &mut Self { ++ self.target = target; ++ self ++ } ++ ++ /// Parses a style choice string. ++ /// ++ /// See the [Disabling colors] section for more details. ++ /// ++ /// [Disabling colors]: ../index.html#disabling-colors ++ pub fn parse(&mut self, write_style: &str) -> &mut Self { ++ self.write_style(parse_write_style(write_style)) ++ } ++ ++ /// Whether or not to print style characters when writing. ++ pub fn write_style(&mut self, write_style: WriteStyle) -> &mut Self { ++ self.write_style = write_style; ++ self ++ } ++ ++ /// Build a terminal writer. ++ pub fn build(&mut self) -> Writer { ++ let color_choice = match self.write_style { ++ WriteStyle::Auto => { ++ if atty::is(match self.target { ++ Target::Stderr => atty::Stream::Stderr, ++ Target::Stdout => atty::Stream::Stdout, ++ }) { ++ ColorChoice::Auto ++ } else { ++ ColorChoice::Never ++ } ++ }, ++ WriteStyle::Always => ColorChoice::Always, ++ WriteStyle::Never => ColorChoice::Never, ++ }; ++ ++ let writer = match self.target { ++ Target::Stderr => BufferWriter::stderr(color_choice), ++ Target::Stdout => BufferWriter::stdout(color_choice), ++ }; ++ ++ Writer { ++ inner: writer, ++ write_style: self.write_style, ++ } ++ } ++} ++ ++impl Default for Builder { ++ fn default() -> Self { ++ Builder::new() ++ } ++} ++ ++impl Style { ++ /// Set the text color. ++ /// ++ /// # Examples ++ /// ++ /// Create a style with red text: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// use env_logger::fmt::Color; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut style = buf.style(); ++ /// ++ /// style.set_color(Color::Red); ++ /// ++ /// writeln!(buf, "{}", style.value(record.args())) ++ /// }); ++ /// ``` ++ pub fn set_color(&mut self, color: Color) -> &mut Style { ++ self.spec.set_fg(color.into_termcolor()); ++ self ++ } ++ ++ /// Set the text weight. ++ /// ++ /// If `yes` is true then text will be written in bold. ++ /// If `yes` is false then text will be written in the default weight. ++ /// ++ /// # Examples ++ /// ++ /// Create a style with bold text: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut style = buf.style(); ++ /// ++ /// style.set_bold(true); ++ /// ++ /// writeln!(buf, "{}", style.value(record.args())) ++ /// }); ++ /// ``` ++ pub fn set_bold(&mut self, yes: bool) -> &mut Style { ++ self.spec.set_bold(yes); ++ self ++ } ++ ++ /// Set the text intensity. ++ /// ++ /// If `yes` is true then text will be written in a brighter color. ++ /// If `yes` is false then text will be written in the default color. ++ /// ++ /// # Examples ++ /// ++ /// Create a style with intense text: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut style = buf.style(); ++ /// ++ /// style.set_intense(true); ++ /// ++ /// writeln!(buf, "{}", style.value(record.args())) ++ /// }); ++ /// ``` ++ pub fn set_intense(&mut self, yes: bool) -> &mut Style { ++ self.spec.set_intense(yes); ++ self ++ } ++ ++ /// Set the background color. ++ /// ++ /// # Examples ++ /// ++ /// Create a style with a yellow background: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// use env_logger::fmt::Color; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut style = buf.style(); ++ /// ++ /// style.set_bg(Color::Yellow); ++ /// ++ /// writeln!(buf, "{}", style.value(record.args())) ++ /// }); ++ /// ``` ++ pub fn set_bg(&mut self, color: Color) -> &mut Style { ++ self.spec.set_bg(color.into_termcolor()); ++ self ++ } ++ ++ /// Wrap a value in the style. ++ /// ++ /// The same `Style` can be used to print multiple different values. ++ /// ++ /// # Examples ++ /// ++ /// Create a bold, red colored style and use it to print the log level: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// use env_logger::fmt::Color; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut style = buf.style(); ++ /// ++ /// style.set_color(Color::Red).set_bold(true); ++ /// ++ /// writeln!(buf, "{}: {}", ++ /// style.value(record.level()), ++ /// record.args()) ++ /// }); ++ /// ``` ++ pub fn value(&self, value: T) -> StyledValue { ++ StyledValue { ++ style: &self, ++ value ++ } ++ } ++} ++ ++impl Formatter { ++ pub(crate) fn new(writer: &Writer) -> Self { ++ Formatter { ++ buf: Rc::new(RefCell::new(writer.inner.buffer())), ++ write_style: writer.write_style(), ++ } ++ } ++ ++ pub(crate) fn write_style(&self) -> WriteStyle { ++ self.write_style ++ } ++ ++ /// Begin a new [`Style`]. ++ /// ++ /// # Examples ++ /// ++ /// Create a bold, red colored style and use it to print the log level: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// use env_logger::fmt::Color; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let mut level_style = buf.style(); ++ /// ++ /// level_style.set_color(Color::Red).set_bold(true); ++ /// ++ /// writeln!(buf, "{}: {}", ++ /// level_style.value(record.level()), ++ /// record.args()) ++ /// }); ++ /// ``` ++ /// ++ /// [`Style`]: struct.Style.html ++ pub fn style(&self) -> Style { ++ Style { ++ buf: self.buf.clone(), ++ spec: ColorSpec::new(), ++ } ++ } ++ ++ /// Get a [`Timestamp`] for the current date and time in UTC. ++ /// ++ /// # Examples ++ /// ++ /// Include the current timestamp with the log record: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// ++ /// let mut builder = env_logger::Builder::new(); ++ /// ++ /// builder.format(|buf, record| { ++ /// let ts = buf.timestamp(); ++ /// ++ /// writeln!(buf, "{}: {}: {}", ts, record.level(), record.args()) ++ /// }); ++ /// ``` ++ /// ++ /// [`Timestamp`]: struct.Timestamp.html ++ pub fn timestamp(&self) -> Timestamp { ++ Timestamp(SystemTime::now()) ++ } ++ ++ /// Get a [`PreciseTimestamp`] for the current date and time in UTC with nanos. ++ pub fn precise_timestamp(&self) -> PreciseTimestamp { ++ PreciseTimestamp(SystemTime::now()) ++ } ++ ++ pub(crate) fn print(&self, writer: &Writer) -> io::Result<()> { ++ writer.inner.print(&self.buf.borrow()) ++ } ++ ++ pub(crate) fn clear(&mut self) { ++ self.buf.borrow_mut().clear() ++ } ++} ++ ++impl Write for Formatter { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.buf.borrow_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.buf.borrow_mut().flush() ++ } ++} ++ ++impl<'a, T> StyledValue<'a, T> { ++ fn write_fmt(&self, f: F) -> fmt::Result ++ where ++ F: FnOnce() -> fmt::Result, ++ { ++ self.style.buf.borrow_mut().set_color(&self.style.spec).map_err(|_| fmt::Error)?; ++ ++ // Always try to reset the terminal style, even if writing failed ++ let write = f(); ++ let reset = self.style.buf.borrow_mut().reset().map_err(|_| fmt::Error); ++ ++ write.and(reset) ++ } ++} ++ ++impl fmt::Debug for Timestamp { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ /// A `Debug` wrapper for `Timestamp` that uses the `Display` implementation. ++ struct TimestampValue<'a>(&'a Timestamp); ++ ++ impl<'a> fmt::Debug for TimestampValue<'a> { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ fmt::Display::fmt(&self.0, f) ++ } ++ } ++ ++ f.debug_tuple("Timestamp") ++ .field(&TimestampValue(&self)) ++ .finish() ++ } ++} ++ ++impl fmt::Debug for Writer { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Writer").finish() ++ } ++} ++ ++impl fmt::Debug for Formatter { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Formatter").finish() ++ } ++} ++ ++impl fmt::Debug for Builder { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Logger") ++ .field("target", &self.target) ++ .field("write_style", &self.write_style) ++ .finish() ++ } ++} ++ ++impl fmt::Debug for Style { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Style").field("spec", &self.spec).finish() ++ } ++} ++ ++macro_rules! impl_styled_value_fmt { ++ ($($fmt_trait:path),*) => { ++ $( ++ impl<'a, T: $fmt_trait> $fmt_trait for StyledValue<'a, T> { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ self.write_fmt(|| T::fmt(&self.value, f)) ++ } ++ } ++ )* ++ }; ++} ++ ++impl_styled_value_fmt!( ++ fmt::Debug, ++ fmt::Display, ++ fmt::Pointer, ++ fmt::Octal, ++ fmt::Binary, ++ fmt::UpperHex, ++ fmt::LowerHex, ++ fmt::UpperExp, ++ fmt::LowerExp); ++ ++impl fmt::Display for Timestamp { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ format_rfc3339_seconds(self.0).fmt(f) ++ } ++} ++ ++impl fmt::Display for PreciseTimestamp { ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ format_rfc3339_nanos(self.0).fmt(f) ++ } ++} ++ ++// The `Color` type is copied from https://github.com/BurntSushi/ripgrep/tree/master/termcolor ++ ++/// The set of available colors for the terminal foreground/background. ++/// ++/// The `Ansi256` and `Rgb` colors will only output the correct codes when ++/// paired with the `Ansi` `WriteColor` implementation. ++/// ++/// The `Ansi256` and `Rgb` color types are not supported when writing colors ++/// on Windows using the console. If they are used on Windows, then they are ++/// silently ignored and no colors will be emitted. ++/// ++/// This set may expand over time. ++/// ++/// This type has a `FromStr` impl that can parse colors from their human ++/// readable form. The format is as follows: ++/// ++/// 1. Any of the explicitly listed colors in English. They are matched ++/// case insensitively. ++/// 2. A single 8-bit integer, in either decimal or hexadecimal format. ++/// 3. A triple of 8-bit integers separated by a comma, where each integer is ++/// in decimal or hexadecimal format. ++/// ++/// Hexadecimal numbers are written with a `0x` prefix. ++#[allow(missing_docs)] ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub enum Color { ++ Black, ++ Blue, ++ Green, ++ Red, ++ Cyan, ++ Magenta, ++ Yellow, ++ White, ++ Ansi256(u8), ++ Rgb(u8, u8, u8), ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++/// An error from parsing an invalid color specification. ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub struct ParseColorError(ParseColorErrorKind); ++ ++#[derive(Clone, Debug, Eq, PartialEq)] ++enum ParseColorErrorKind { ++ /// An error originating from `termcolor`. ++ TermColor(termcolor::ParseColorError), ++ /// An error converting the `termcolor` color to a `env_logger::Color`. ++ /// ++ /// This variant should only get reached if a user uses a new spec that's ++ /// valid for `termcolor`, but not recognised in `env_logger` yet. ++ Unrecognized { ++ given: String, ++ } ++} ++ ++impl ParseColorError { ++ fn termcolor(err: termcolor::ParseColorError) -> Self { ++ ParseColorError(ParseColorErrorKind::TermColor(err)) ++ } ++ ++ fn unrecognized(given: String) -> Self { ++ ParseColorError(ParseColorErrorKind::Unrecognized { given }) ++ } ++ ++ /// Return the string that couldn't be parsed as a valid color. ++ pub fn invalid(&self) -> &str { ++ match self.0 { ++ ParseColorErrorKind::TermColor(ref err) => err.invalid(), ++ ParseColorErrorKind::Unrecognized { ref given, .. } => given, ++ } ++ } ++} ++ ++impl Error for ParseColorError { ++ fn description(&self) -> &str { ++ match self.0 { ++ ParseColorErrorKind::TermColor(ref err) => err.description(), ++ ParseColorErrorKind::Unrecognized { .. } => "unrecognized color value", ++ } ++ } ++} ++ ++impl fmt::Display for ParseColorError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ match self.0 { ++ ParseColorErrorKind::TermColor(ref err) => fmt::Display::fmt(err, f), ++ ParseColorErrorKind::Unrecognized { ref given, .. } => { ++ write!(f, "unrecognized color value '{}'", given) ++ } ++ } ++ } ++} ++ ++impl Color { ++ fn into_termcolor(self) -> Option { ++ match self { ++ Color::Black => Some(termcolor::Color::Black), ++ Color::Blue => Some(termcolor::Color::Blue), ++ Color::Green => Some(termcolor::Color::Green), ++ Color::Red => Some(termcolor::Color::Red), ++ Color::Cyan => Some(termcolor::Color::Cyan), ++ Color::Magenta => Some(termcolor::Color::Magenta), ++ Color::Yellow => Some(termcolor::Color::Yellow), ++ Color::White => Some(termcolor::Color::White), ++ Color::Ansi256(value) => Some(termcolor::Color::Ansi256(value)), ++ Color::Rgb(r, g, b) => Some(termcolor::Color::Rgb(r, g, b)), ++ _ => None, ++ } ++ } ++ ++ fn from_termcolor(color: termcolor::Color) -> Option { ++ match color { ++ termcolor::Color::Black => Some(Color::Black), ++ termcolor::Color::Blue => Some(Color::Blue), ++ termcolor::Color::Green => Some(Color::Green), ++ termcolor::Color::Red => Some(Color::Red), ++ termcolor::Color::Cyan => Some(Color::Cyan), ++ termcolor::Color::Magenta => Some(Color::Magenta), ++ termcolor::Color::Yellow => Some(Color::Yellow), ++ termcolor::Color::White => Some(Color::White), ++ termcolor::Color::Ansi256(value) => Some(Color::Ansi256(value)), ++ termcolor::Color::Rgb(r, g, b) => Some(Color::Rgb(r, g, b)), ++ _ => None, ++ } ++ } ++} ++ ++impl FromStr for Color { ++ type Err = ParseColorError; ++ ++ fn from_str(s: &str) -> Result { ++ let tc = termcolor::Color::from_str(s).map_err(ParseColorError::termcolor)?; ++ Color::from_termcolor(tc).ok_or_else(|| ParseColorError::unrecognized(s.into())) ++ } ++} ++ ++fn parse_write_style(spec: &str) -> WriteStyle { ++ match spec { ++ "auto" => WriteStyle::Auto, ++ "always" => WriteStyle::Always, ++ "never" => WriteStyle::Never, ++ _ => Default::default(), ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn parse_write_style_valid() { ++ let inputs = vec![ ++ ("auto", WriteStyle::Auto), ++ ("always", WriteStyle::Always), ++ ("never", WriteStyle::Never), ++ ]; ++ ++ for (input, expected) in inputs { ++ assert_eq!(expected, parse_write_style(input)); ++ } ++ } ++ ++ #[test] ++ fn parse_write_style_invalid() { ++ let inputs = vec![ ++ "", ++ "true", ++ "false", ++ "NEVER!!" ++ ]; ++ ++ for input in inputs { ++ assert_eq!(WriteStyle::Auto, parse_write_style(input)); ++ } ++ } ++ ++ #[test] ++ fn parse_color_name_valid() { ++ let inputs = vec![ ++ "black", ++ "blue", ++ "green", ++ "red", ++ "cyan", ++ "magenta", ++ "yellow", ++ "white", ++ ]; ++ ++ for input in inputs { ++ assert!(Color::from_str(input).is_ok()); ++ } ++ } ++ ++ #[test] ++ fn parse_color_ansi_valid() { ++ let inputs = vec![ ++ "7", ++ "32", ++ "0xFF", ++ ]; ++ ++ for input in inputs { ++ assert!(Color::from_str(input).is_ok()); ++ } ++ } ++ ++ #[test] ++ fn parse_color_rgb_valid() { ++ let inputs = vec![ ++ "0,0,0", ++ "0,128,255", ++ "0x0,0x0,0x0", ++ "0x33,0x66,0xFF", ++ ]; ++ ++ for input in inputs { ++ assert!(Color::from_str(input).is_ok()); ++ } ++ } ++ ++ #[test] ++ fn parse_color_invalid() { ++ let inputs = vec![ ++ "not_a_color", ++ "256", ++ "0,0", ++ "0,0,256", ++ ]; ++ ++ for input in inputs { ++ let err = Color::from_str(input).unwrap_err(); ++ assert_eq!(input, err.invalid()); ++ } ++ } ++} diff --cc vendor/env_logger-0.5.12/src/lib.rs index 000000000,000000000..b5e445c81 new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/src/lib.rs @@@ -1,0 -1,0 +1,1134 @@@ ++// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution and at ++// http://rust-lang.org/COPYRIGHT. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++//! A simple logger configured via environment variables which writes ++//! to stdout or stderr, for use with the logging facade exposed by the ++//! [`log` crate][log-crate-url]. ++//! ++//! ## Example ++//! ++//! ``` ++//! #[macro_use] extern crate log; ++//! extern crate env_logger; ++//! ++//! use log::Level; ++//! ++//! fn main() { ++//! env_logger::init(); ++//! ++//! debug!("this is a debug {}", "message"); ++//! error!("this is printed by default"); ++//! ++//! if log_enabled!(Level::Info) { ++//! let x = 3 * 4; // expensive computation ++//! info!("the answer was: {}", x); ++//! } ++//! } ++//! ``` ++//! ++//! Assumes the binary is `main`: ++//! ++//! ```{.bash} ++//! $ RUST_LOG=error ./main ++//! ERROR: 2017-11-09T02:12:24Z: main: this is printed by default ++//! ``` ++//! ++//! ```{.bash} ++//! $ RUST_LOG=info ./main ++//! ERROR: 2017-11-09T02:12:24Z: main: this is printed by default ++//! INFO: 2017-11-09T02:12:24Z: main: the answer was: 12 ++//! ``` ++//! ++//! ```{.bash} ++//! $ RUST_LOG=debug ./main ++//! DEBUG: 2017-11-09T02:12:24Z: main: this is a debug message ++//! ERROR: 2017-11-09T02:12:24Z: main: this is printed by default ++//! INFO: 2017-11-09T02:12:24Z: main: the answer was: 12 ++//! ``` ++//! ++//! You can also set the log level on a per module basis: ++//! ++//! ```{.bash} ++//! $ RUST_LOG=main=info ./main ++//! ERROR: 2017-11-09T02:12:24Z: main: this is printed by default ++//! INFO: 2017-11-09T02:12:24Z: main: the answer was: 12 ++//! ``` ++//! ++//! And enable all logging: ++//! ++//! ```{.bash} ++//! $ RUST_LOG=main ./main ++//! DEBUG: 2017-11-09T02:12:24Z: main: this is a debug message ++//! ERROR: 2017-11-09T02:12:24Z: main: this is printed by default ++//! INFO: 2017-11-09T02:12:24Z: main: the answer was: 12 ++//! ``` ++//! ++//! If the binary name contains hyphens, you will need to replace ++//! them with underscores: ++//! ++//! ```{.bash} ++//! $ RUST_LOG=my_app ./my-app ++//! DEBUG: 2017-11-09T02:12:24Z: my_app: this is a debug message ++//! ERROR: 2017-11-09T02:12:24Z: my_app: this is printed by default ++//! INFO: 2017-11-09T02:12:24Z: my_app: the answer was: 12 ++//! ``` ++//! ++//! This is because Rust modules and crates cannot contain hyphens ++//! in their name, although `cargo` continues to accept them. ++//! ++//! See the documentation for the [`log` crate][log-crate-url] for more ++//! information about its API. ++//! ++//! ## Enabling logging ++//! ++//! Log levels are controlled on a per-module basis, and by default all logging ++//! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` ++//! environment variable. The value of this environment variable is a ++//! comma-separated list of logging directives. A logging directive is of the ++//! form: ++//! ++//! ```text ++//! path::to::module=level ++//! ``` ++//! ++//! The path to the module is rooted in the name of the crate it was compiled ++//! for, so if your program is contained in a file `hello.rs`, for example, to ++//! turn on logging for this file you would use a value of `RUST_LOG=hello`. ++//! Furthermore, this path is a prefix-search, so all modules nested in the ++//! specified module will also have logging enabled. ++//! ++//! The actual `level` is optional to specify. If omitted, all logging will ++//! be enabled. If specified, it must be one of the strings `debug`, `error`, ++//! `info`, `warn`, or `trace`. ++//! ++//! As the log level for a module is optional, the module to enable logging for ++//! is also optional. If only a `level` is provided, then the global log ++//! level for all modules is set to this value. ++//! ++//! Some examples of valid values of `RUST_LOG` are: ++//! ++//! * `hello` turns on all logging for the 'hello' module ++//! * `info` turns on all info logging ++//! * `hello=debug` turns on debug logging for 'hello' ++//! * `hello,std::option` turns on hello, and std's option logging ++//! * `error,hello=warn` turn on global error logging and also warn for hello ++//! ++//! ## Filtering results ++//! ++//! A `RUST_LOG` directive may include a regex filter. The syntax is to append `/` ++//! followed by a regex. Each message is checked against the regex, and is only ++//! logged if it matches. Note that the matching is done after formatting the ++//! log string but before adding any logging meta-data. There is a single filter ++//! for all modules. ++//! ++//! Some examples: ++//! ++//! * `hello/foo` turns on all logging for the 'hello' module where the log ++//! message includes 'foo'. ++//! * `info/f.o` turns on all info logging where the log message includes 'foo', ++//! 'f1o', 'fao', etc. ++//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log ++//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. ++//! * `error,hello=warn/[0-9]scopes` turn on global error logging and also ++//! warn for hello. In both cases the log message must include a single digit ++//! number followed by 'scopes'. ++//! ++//! ## Disabling colors ++//! ++//! Colors and other styles can be configured with the `RUST_LOG_STYLE` ++//! environment variable. It accepts the following values: ++//! ++//! * `auto` (default) will attempt to print style characters, but don't force the issue. ++//! If the console isn't available on Windows, or if TERM=dumb, for example, then don't print colors. ++//! * `always` will always print style characters even if they aren't supported by the terminal. ++//! This includes emitting ANSI colors on Windows if the console API is unavailable. ++//! * `never` will never print style characters. ++//! ++//! ## Tweaking the default format ++//! ++//! Parts of the default format can be excluded from the log output using the [`Builder`]. ++//! The following example excluding the timestamp from the log output: ++//! ++//! ``` ++//! #[macro_use] extern crate log; ++//! extern crate env_logger; ++//! ++//! use log::Level; ++//! ++//! fn main() { ++//! env_logger::Builder::from_default_env() ++//! .default_format_timestamp(false) ++//! .init(); ++//! ++//! debug!("this is a debug {}", "message"); ++//! error!("this is printed by default"); ++//! ++//! if log_enabled!(Level::Info) { ++//! let x = 3 * 4; // expensive computation ++//! info!("the answer was: {}", x); ++//! } ++//! } ++//! ``` ++//! ++//! ## Specifying defaults for environment variables ++//! ++//! `env_logger` can read configuration from environment variables. ++//! If these variables aren't present, the default value to use can be tweaked with the [`Env`] type. ++//! The following example defaults to log `warn` and above if the `RUST_LOG` environment variable ++//! isn't set: ++//! ++//! ``` ++//! #[macro_use] extern crate log; ++//! extern crate env_logger; ++//! ++//! use log::Level; ++//! ++//! fn main() { ++//! let env = env_logger::Env::default() ++//! .filter_or(env_logger::DEFAULT_FILTER_ENV, "warn"); ++//! ++//! env_logger::Builder::from_env(env).init(); ++//! } ++//! ``` ++//! ++//! [log-crate-url]: https://docs.rs/log/ ++//! [`Builder`]: struct.Builder.html ++//! [`Env`]: struct.Env.html ++ ++#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", ++ html_favicon_url = "http://www.rust-lang.org/favicon.ico", ++ html_root_url = "https://docs.rs/env_logger/0.5.12")] ++#![cfg_attr(test, deny(warnings))] ++ ++// When compiled for the rustc compiler itself we want to make sure that this is ++// an unstable crate ++#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] ++#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] ++ ++#![deny(missing_debug_implementations, missing_docs, warnings)] ++ ++extern crate log; ++extern crate termcolor; ++extern crate humantime; ++extern crate atty; ++ ++use std::env; ++use std::borrow::Cow; ++use std::io::prelude::*; ++use std::io; ++use std::mem; ++use std::cell::RefCell; ++ ++use log::{Log, LevelFilter, Level, Record, SetLoggerError, Metadata}; ++ ++pub mod filter; ++pub mod fmt; ++ ++pub use self::fmt::{Target, WriteStyle, Color, Formatter}; ++ ++/// The default name for the environment variable to read filters from. ++pub const DEFAULT_FILTER_ENV: &'static str = "RUST_LOG"; ++ ++/// The default name for the environment variable to read style preferences from. ++pub const DEFAULT_WRITE_STYLE_ENV: &'static str = "RUST_LOG_STYLE"; ++ ++/// Set of environment variables to configure from. ++/// ++/// # Default environment variables ++/// ++/// By default, the `Env` will read the following environment variables: ++/// ++/// - `RUST_LOG`: the level filter ++/// - `RUST_LOG_STYLE`: whether or not to print styles with records. ++/// ++/// These sources can be configured using the builder methods on `Env`. ++#[derive(Debug)] ++pub struct Env<'a> { ++ filter: Var<'a>, ++ write_style: Var<'a>, ++} ++ ++#[derive(Debug)] ++struct Var<'a> { ++ name: Cow<'a, str>, ++ default: Option>, ++} ++ ++/// The env logger. ++/// ++/// This struct implements the `Log` trait from the [`log` crate][log-crate-url], ++/// which allows it to act as a logger. ++/// ++/// The [`init()`], [`try_init()`], [`Builder::init()`] and [`Builder::try_init()`] ++/// methods will each construct a `Logger` and immediately initialize it as the ++/// default global logger. ++/// ++/// If you'd instead need access to the constructed `Logger`, you can use ++/// the associated [`Builder`] and install it with the ++/// [`log` crate][log-crate-url] directly. ++/// ++/// [log-crate-url]: https://docs.rs/log/ ++/// [`init()`]: fn.init.html ++/// [`try_init()`]: fn.try_init.html ++/// [`Builder::init()`]: struct.Builder.html#method.init ++/// [`Builder::try_init()`]: struct.Builder.html#method.try_init ++/// [`Builder`]: struct.Builder.html ++pub struct Logger { ++ writer: fmt::Writer, ++ filter: filter::Filter, ++ format: Box io::Result<()> + Sync + Send>, ++} ++ ++struct Format { ++ default_format_timestamp: bool, ++ default_format_module_path: bool, ++ default_format_level: bool, ++ default_format_timestamp_nanos: bool, ++ custom_format: Option io::Result<()> + Sync + Send>>, ++} ++ ++impl Default for Format { ++ fn default() -> Self { ++ Format { ++ default_format_timestamp: true, ++ default_format_module_path: true, ++ default_format_level: true, ++ default_format_timestamp_nanos: false, ++ custom_format: None, ++ } ++ } ++} ++ ++impl Format { ++ /// Convert the format into a callable function. ++ /// ++ /// If the `custom_format` is `Some`, then any `default_format` switches are ignored. ++ /// If the `custom_format` is `None`, then a default format is returned. ++ /// Any `default_format` switches set to `false` won't be written by the format. ++ fn into_boxed_fn(self) -> Box io::Result<()> + Sync + Send> { ++ if let Some(fmt) = self.custom_format { ++ fmt ++ } ++ else { ++ Box::new(move |buf, record| { ++ let write_level = if self.default_format_level { ++ let level = record.level(); ++ let mut level_style = buf.style(); ++ ++ match level { ++ Level::Trace => level_style.set_color(Color::White), ++ Level::Debug => level_style.set_color(Color::Blue), ++ Level::Info => level_style.set_color(Color::Green), ++ Level::Warn => level_style.set_color(Color::Yellow), ++ Level::Error => level_style.set_color(Color::Red).set_bold(true), ++ }; ++ ++ write!(buf, "{:>5} ", level_style.value(level)) ++ } else { ++ Ok(()) ++ }; ++ ++ let write_ts = if self.default_format_timestamp { ++ if self.default_format_timestamp_nanos { ++ let ts_nanos = buf.precise_timestamp(); ++ write!(buf, "{}: ", ts_nanos) ++ } else { ++ let ts = buf.timestamp(); ++ write!(buf, "{}: ", ts) ++ } ++ } else { ++ Ok(()) ++ }; ++ ++ let default_format_module_path = (self.default_format_module_path, record.module_path()); ++ let write_module_path = if let (true, Some(module_path)) = default_format_module_path { ++ write!(buf, "{}: ", module_path) ++ } else { ++ Ok(()) ++ }; ++ ++ let write_args = writeln!(buf, "{}", record.args()); ++ ++ write_level.and(write_ts).and(write_module_path).and(write_args) ++ }) ++ } ++ } ++} ++ ++/// `Builder` acts as builder for initializing a `Logger`. ++/// ++/// It can be used to customize the log format, change the environment variable used ++/// to provide the logging directives and also set the default log level filter. ++/// ++/// # Examples ++/// ++/// ``` ++/// #[macro_use] ++/// extern crate log; ++/// extern crate env_logger; ++/// ++/// use std::env; ++/// use std::io::Write; ++/// use log::LevelFilter; ++/// use env_logger::Builder; ++/// ++/// fn main() { ++/// let mut builder = Builder::from_default_env(); ++/// ++/// builder.format(|buf, record| writeln!(buf, "{} - {}", record.level(), record.args())) ++/// .filter(None, LevelFilter::Info) ++/// .init(); ++/// ++/// error!("error message"); ++/// info!("info message"); ++/// } ++/// ``` ++#[derive(Default)] ++pub struct Builder { ++ filter: filter::Builder, ++ writer: fmt::Builder, ++ format: Format, ++} ++ ++impl Builder { ++ /// Initializes the log builder with defaults. ++ /// ++ /// **NOTE:** This method won't read from any environment variables. ++ /// Use the [`filter`] and [`write_style`] methods to configure the builder ++ /// or use [`from_env`] or [`from_default_env`] instead. ++ /// ++ /// # Examples ++ /// ++ /// Create a new builder and configure filters and style: ++ /// ++ /// ``` ++ /// # extern crate log; ++ /// # extern crate env_logger; ++ /// # fn main() { ++ /// use log::LevelFilter; ++ /// use env_logger::{Builder, WriteStyle}; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.filter(None, LevelFilter::Info) ++ /// .write_style(WriteStyle::Always) ++ /// .init(); ++ /// # } ++ /// ``` ++ /// ++ /// [`filter`]: #method.filter ++ /// [`write_style`]: #method.write_style ++ /// [`from_env`]: #method.from_env ++ /// [`from_default_env`]: #method.from_default_env ++ pub fn new() -> Builder { ++ Default::default() ++ } ++ ++ /// Initializes the log builder from the environment. ++ /// ++ /// The variables used to read configuration from can be tweaked before ++ /// passing in. ++ /// ++ /// # Examples ++ /// ++ /// Initialise a logger reading the log filter from an environment variable ++ /// called `MY_LOG`: ++ /// ++ /// ``` ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::from_env("MY_LOG"); ++ /// builder.init(); ++ /// ``` ++ /// ++ /// Initialise a logger using the `MY_LOG` variable for filtering and ++ /// `MY_LOG_STYLE` for whether or not to write styles: ++ /// ++ /// ``` ++ /// use env_logger::{Builder, Env}; ++ /// ++ /// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); ++ /// ++ /// let mut builder = Builder::from_env(env); ++ /// builder.init(); ++ /// ``` ++ pub fn from_env<'a, E>(env: E) -> Self ++ where ++ E: Into> ++ { ++ let mut builder = Builder::new(); ++ let env = env.into(); ++ ++ if let Some(s) = env.get_filter() { ++ builder.parse(&s); ++ } ++ ++ if let Some(s) = env.get_write_style() { ++ builder.parse_write_style(&s); ++ } ++ ++ builder ++ } ++ ++ /// Initializes the log builder from the environment using default variable names. ++ /// ++ /// This method is a convenient way to call `from_env(Env::default())` without ++ /// having to use the `Env` type explicitly. The builder will use the ++ /// [default environment variables]. ++ /// ++ /// # Examples ++ /// ++ /// Initialise a logger using the default environment variables: ++ /// ++ /// ``` ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::from_default_env(); ++ /// builder.init(); ++ /// ``` ++ /// ++ /// [default environment variables]: struct.Env.html#default-environment-variables ++ pub fn from_default_env() -> Self { ++ Self::from_env(Env::default()) ++ } ++ ++ /// Sets the format function for formatting the log output. ++ /// ++ /// This function is called on each record logged and should format the ++ /// log record and output it to the given [`Formatter`]. ++ /// ++ /// The format function is expected to output the string directly to the ++ /// `Formatter` so that implementations can use the [`std::fmt`] macros ++ /// to format and output without intermediate heap allocations. The default ++ /// `env_logger` formatter takes advantage of this. ++ /// ++ /// # Examples ++ /// ++ /// Use a custom format to write only the log message: ++ /// ++ /// ``` ++ /// use std::io::Write; ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.format(|buf, record| write!(buf, "{}", record.args())); ++ /// ``` ++ /// ++ /// [`Formatter`]: fmt/struct.Formatter.html ++ /// [`String`]: https://doc.rust-lang.org/stable/std/string/struct.String.html ++ /// [`std::fmt`]: https://doc.rust-lang.org/std/fmt/index.html ++ pub fn format(&mut self, format: F) -> &mut Self ++ where F: Fn(&mut Formatter, &Record) -> io::Result<()> + Sync + Send ++ { ++ self.format.custom_format = Some(Box::new(format)); ++ self ++ } ++ ++ /// Use the default format. ++ /// ++ /// This method will clear any custom format set on the builder. ++ pub fn default_format(&mut self) -> &mut Self { ++ self.format.custom_format = None; ++ self ++ } ++ ++ /// Whether or not to write the level in the default format. ++ pub fn default_format_level(&mut self, write: bool) -> &mut Self { ++ self.format.default_format_level = write; ++ self ++ } ++ ++ /// Whether or not to write the module path in the default format. ++ pub fn default_format_module_path(&mut self, write: bool) -> &mut Self { ++ self.format.default_format_module_path = write; ++ self ++ } ++ ++ /// Whether or not to write the timestamp in the default format. ++ pub fn default_format_timestamp(&mut self, write: bool) -> &mut Self { ++ self.format.default_format_timestamp = write; ++ self ++ } ++ ++ /// Whether or not to write the timestamp with nanos. ++ pub fn default_format_timestamp_nanos(&mut self, write: bool) -> &mut Self { ++ self.format.default_format_timestamp_nanos = write; ++ self ++ } ++ ++ /// Adds a directive to the filter for a specific module. ++ /// ++ /// # Examples ++ /// ++ /// Only include messages for warning and above for logs in `path::to::module`: ++ /// ++ /// ``` ++ /// # extern crate log; ++ /// # extern crate env_logger; ++ /// # fn main() { ++ /// use log::LevelFilter; ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.filter_module("path::to::module", LevelFilter::Info); ++ /// # } ++ /// ``` ++ pub fn filter_module(&mut self, module: &str, level: LevelFilter) -> &mut Self { ++ self.filter.filter_module(module, level); ++ self ++ } ++ ++ /// Adds a directive to the filter for all modules. ++ /// ++ /// # Examples ++ /// ++ /// Only include messages for warning and above for logs in `path::to::module`: ++ /// ++ /// ``` ++ /// # extern crate log; ++ /// # extern crate env_logger; ++ /// # fn main() { ++ /// use log::LevelFilter; ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.filter_level(LevelFilter::Info); ++ /// # } ++ /// ``` ++ pub fn filter_level(&mut self, level: LevelFilter) -> &mut Self { ++ self.filter.filter_level(level); ++ self ++ } ++ ++ /// Adds filters to the logger. ++ /// ++ /// The given module (if any) will log at most the specified level provided. ++ /// If no module is provided then the filter will apply to all log messages. ++ /// ++ /// # Examples ++ /// ++ /// Only include messages for warning and above for logs in `path::to::module`: ++ /// ++ /// ``` ++ /// # extern crate log; ++ /// # extern crate env_logger; ++ /// # fn main() { ++ /// use log::LevelFilter; ++ /// use env_logger::Builder; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.filter(Some("path::to::module"), LevelFilter::Info); ++ /// # } ++ /// ``` ++ pub fn filter(&mut self, ++ module: Option<&str>, ++ level: LevelFilter) -> &mut Self { ++ self.filter.filter(module, level); ++ self ++ } ++ ++ /// Parses the directives string in the same form as the `RUST_LOG` ++ /// environment variable. ++ /// ++ /// See the module documentation for more details. ++ pub fn parse(&mut self, filters: &str) -> &mut Self { ++ self.filter.parse(filters); ++ self ++ } ++ ++ /// Sets the target for the log output. ++ /// ++ /// Env logger can log to either stdout or stderr. The default is stderr. ++ /// ++ /// # Examples ++ /// ++ /// Write log message to `stdout`: ++ /// ++ /// ``` ++ /// use env_logger::{Builder, Target}; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.target(Target::Stdout); ++ /// ``` ++ pub fn target(&mut self, target: fmt::Target) -> &mut Self { ++ self.writer.target(target); ++ self ++ } ++ ++ /// Sets whether or not styles will be written. ++ /// ++ /// This can be useful in environments that don't support control characters ++ /// for setting colors. ++ /// ++ /// # Examples ++ /// ++ /// Never attempt to write styles: ++ /// ++ /// ``` ++ /// use env_logger::{Builder, WriteStyle}; ++ /// ++ /// let mut builder = Builder::new(); ++ /// ++ /// builder.write_style(WriteStyle::Never); ++ /// ``` ++ pub fn write_style(&mut self, write_style: fmt::WriteStyle) -> &mut Self { ++ self.writer.write_style(write_style); ++ self ++ } ++ ++ /// Parses whether or not to write styles in the same form as the `RUST_LOG_STYLE` ++ /// environment variable. ++ /// ++ /// See the module documentation for more details. ++ pub fn parse_write_style(&mut self, write_style: &str) -> &mut Self { ++ self.writer.parse(write_style); ++ self ++ } ++ ++ /// Initializes the global logger with the built env logger. ++ /// ++ /// This should be called early in the execution of a Rust program. Any log ++ /// events that occur before initialization will be ignored. ++ /// ++ /// # Errors ++ /// ++ /// This function will fail if it is called more than once, or if another ++ /// library has already initialized a global logger. ++ pub fn try_init(&mut self) -> Result<(), SetLoggerError> { ++ let logger = self.build(); ++ ++ log::set_max_level(logger.filter()); ++ log::set_boxed_logger(Box::new(logger)) ++ } ++ ++ /// Initializes the global logger with the built env logger. ++ /// ++ /// This should be called early in the execution of a Rust program. Any log ++ /// events that occur before initialization will be ignored. ++ /// ++ /// # Panics ++ /// ++ /// This function will panic if it is called more than once, or if another ++ /// library has already initialized a global logger. ++ pub fn init(&mut self) { ++ self.try_init().expect("Builder::init should not be called after logger initialized"); ++ } ++ ++ /// Build an env logger. ++ /// ++ /// The returned logger implements the `Log` trait and can be installed manually ++ /// or nested within another logger. ++ pub fn build(&mut self) -> Logger { ++ Logger { ++ writer: self.writer.build(), ++ filter: self.filter.build(), ++ format: mem::replace(&mut self.format, Default::default()).into_boxed_fn(), ++ } ++ } ++} ++ ++impl Logger { ++ /// Creates the logger from the environment. ++ /// ++ /// The variables used to read configuration from can be tweaked before ++ /// passing in. ++ /// ++ /// # Examples ++ /// ++ /// Create a logger reading the log filter from an environment variable ++ /// called `MY_LOG`: ++ /// ++ /// ``` ++ /// use env_logger::Logger; ++ /// ++ /// let logger = Logger::from_env("MY_LOG"); ++ /// ``` ++ /// ++ /// Create a logger using the `MY_LOG` variable for filtering and ++ /// `MY_LOG_STYLE` for whether or not to write styles: ++ /// ++ /// ``` ++ /// use env_logger::{Logger, Env}; ++ /// ++ /// let env = Env::new().filter_or("MY_LOG", "info").write_style_or("MY_LOG_STYLE", "always"); ++ /// ++ /// let logger = Logger::from_env(env); ++ /// ``` ++ pub fn from_env<'a, E>(env: E) -> Self ++ where ++ E: Into> ++ { ++ Builder::from_env(env).build() ++ } ++ ++ /// Creates the logger from the environment using default variable names. ++ /// ++ /// This method is a convenient way to call `from_env(Env::default())` without ++ /// having to use the `Env` type explicitly. The logger will use the ++ /// [default environment variables]. ++ /// ++ /// # Examples ++ /// ++ /// Creates a logger using the default environment variables: ++ /// ++ /// ``` ++ /// use env_logger::Logger; ++ /// ++ /// let logger = Logger::from_default_env(); ++ /// ``` ++ /// ++ /// [default environment variables]: struct.Env.html#default-environment-variables ++ pub fn from_default_env() -> Self { ++ Builder::from_default_env().build() ++ } ++ ++ /// Returns the maximum `LevelFilter` that this env logger instance is ++ /// configured to output. ++ pub fn filter(&self) -> LevelFilter { ++ self.filter.filter() ++ } ++ ++ /// Checks if this record matches the configured filter. ++ pub fn matches(&self, record: &Record) -> bool { ++ self.filter.matches(record) ++ } ++} ++ ++impl Log for Logger { ++ fn enabled(&self, metadata: &Metadata) -> bool { ++ self.filter.enabled(metadata) ++ } ++ ++ fn log(&self, record: &Record) { ++ if self.matches(record) { ++ // Log records are written to a thread-local buffer before being printed ++ // to the terminal. We clear these buffers afterwards, but they aren't shrinked ++ // so will always at least have capacity for the largest log record formatted ++ // on that thread. ++ // ++ // If multiple `Logger`s are used by the same threads then the thread-local ++ // formatter might have different color support. If this is the case the ++ // formatter and its buffer are discarded and recreated. ++ ++ thread_local! { ++ static FORMATTER: RefCell> = RefCell::new(None); ++ } ++ ++ FORMATTER.with(|tl_buf| { ++ // It's possible for implementations to sometimes ++ // log-while-logging (e.g. a `std::fmt` implementation logs ++ // internally) but it's super rare. If this happens make sure we ++ // at least don't panic and ship some output to the screen. ++ let mut a; ++ let mut b = None; ++ let tl_buf = match tl_buf.try_borrow_mut() { ++ Ok(f) => { ++ a = f; ++ &mut *a ++ } ++ Err(_) => &mut b, ++ }; ++ ++ // Check the buffer style. If it's different from the logger's ++ // style then drop the buffer and recreate it. ++ match *tl_buf { ++ Some(ref mut formatter) => { ++ if formatter.write_style() != self.writer.write_style() { ++ *formatter = Formatter::new(&self.writer) ++ } ++ }, ++ ref mut tl_buf => *tl_buf = Some(Formatter::new(&self.writer)) ++ } ++ ++ // The format is guaranteed to be `Some` by this point ++ let mut formatter = tl_buf.as_mut().unwrap(); ++ ++ let _ = (self.format)(&mut formatter, record).and_then(|_| formatter.print(&self.writer)); ++ ++ // Always clear the buffer afterwards ++ formatter.clear(); ++ }); ++ } ++ } ++ ++ fn flush(&self) {} ++} ++ ++impl<'a> Env<'a> { ++ /// Get a default set of environment variables. ++ pub fn new() -> Self { ++ Self::default() ++ } ++ ++ /// Specify an environment variable to read the filter from. ++ pub fn filter(mut self, filter_env: E) -> Self ++ where ++ E: Into> ++ { ++ self.filter = Var::new(filter_env); ++ ++ self ++ } ++ ++ /// Specify an environment variable to read the filter from. ++ /// ++ /// If the variable is not set, the default value will be used. ++ pub fn filter_or(mut self, filter_env: E, default: V) -> Self ++ where ++ E: Into>, ++ V: Into>, ++ { ++ self.filter = Var::new_with_default(filter_env, default); ++ ++ self ++ } ++ ++ fn get_filter(&self) -> Option { ++ self.filter.get() ++ } ++ ++ /// Specify an environment variable to read the style from. ++ pub fn write_style(mut self, write_style_env: E) -> Self ++ where ++ E: Into> ++ { ++ self.write_style = Var::new(write_style_env); ++ ++ self ++ } ++ ++ /// Specify an environment variable to read the style from. ++ /// ++ /// If the variable is not set, the default value will be used. ++ pub fn write_style_or(mut self, write_style_env: E, default: V) -> Self ++ where ++ E: Into>, ++ V: Into>, ++ { ++ self.write_style = Var::new_with_default(write_style_env, default); ++ ++ self ++ } ++ ++ fn get_write_style(&self) -> Option { ++ self.write_style.get() ++ } ++} ++ ++impl<'a> Var<'a> { ++ fn new(name: E) -> Self ++ where ++ E: Into>, ++ { ++ Var { ++ name: name.into(), ++ default: None, ++ } ++ } ++ ++ fn new_with_default(name: E, default: V) -> Self ++ where ++ E: Into>, ++ V: Into>, ++ { ++ Var { ++ name: name.into(), ++ default: Some(default.into()), ++ } ++ } ++ ++ fn get(&self) -> Option { ++ env::var(&*self.name) ++ .ok() ++ .or_else(|| self.default ++ .to_owned() ++ .map(|v| v.into_owned())) ++ } ++} ++ ++impl<'a, T> From for Env<'a> ++where ++ T: Into> ++{ ++ fn from(filter_env: T) -> Self { ++ Env::default().filter(filter_env.into()) ++ } ++} ++ ++impl<'a> Default for Env<'a> { ++ fn default() -> Self { ++ Env { ++ filter: Var::new(DEFAULT_FILTER_ENV), ++ write_style: Var::new(DEFAULT_WRITE_STYLE_ENV), ++ } ++ } ++} ++ ++mod std_fmt_impls { ++ use std::fmt; ++ use super::*; ++ ++ impl fmt::Debug for Logger{ ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Logger") ++ .field("filter", &self.filter) ++ .finish() ++ } ++ } ++ ++ impl fmt::Debug for Builder{ ++ fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { ++ f.debug_struct("Logger") ++ .field("filter", &self.filter) ++ .field("writer", &self.writer) ++ .finish() ++ } ++ } ++} ++ ++/// Attempts to initialize the global logger with an env logger. ++/// ++/// This should be called early in the execution of a Rust program. Any log ++/// events that occur before initialization will be ignored. ++/// ++/// # Errors ++/// ++/// This function will fail if it is called more than once, or if another ++/// library has already initialized a global logger. ++pub fn try_init() -> Result<(), SetLoggerError> { ++ try_init_from_env(Env::default()) ++} ++ ++/// Initializes the global logger with an env logger. ++/// ++/// This should be called early in the execution of a Rust program. Any log ++/// events that occur before initialization will be ignored. ++/// ++/// # Panics ++/// ++/// This function will panic if it is called more than once, or if another ++/// library has already initialized a global logger. ++pub fn init() { ++ try_init().expect("env_logger::init should not be called after logger initialized"); ++} ++ ++/// Attempts to initialize the global logger with an env logger from the given ++/// environment variables. ++/// ++/// This should be called early in the execution of a Rust program. Any log ++/// events that occur before initialization will be ignored. ++/// ++/// # Examples ++/// ++/// Initialise a logger using the `MY_LOG` environment variable for filters ++/// and `MY_LOG_STYLE` for writing colors: ++/// ++/// ``` ++/// # extern crate env_logger; ++/// use env_logger::{Builder, Env}; ++/// ++/// # fn run() -> Result<(), Box<::std::error::Error>> { ++/// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); ++/// ++/// env_logger::try_init_from_env(env)?; ++/// ++/// Ok(()) ++/// # } ++/// # fn main() { run().unwrap(); } ++/// ``` ++/// ++/// # Errors ++/// ++/// This function will fail if it is called more than once, or if another ++/// library has already initialized a global logger. ++pub fn try_init_from_env<'a, E>(env: E) -> Result<(), SetLoggerError> ++where ++ E: Into> ++{ ++ let mut builder = Builder::from_env(env); ++ ++ builder.try_init() ++} ++ ++/// Initializes the global logger with an env logger from the given environment ++/// variables. ++/// ++/// This should be called early in the execution of a Rust program. Any log ++/// events that occur before initialization will be ignored. ++/// ++/// # Examples ++/// ++/// Initialise a logger using the `MY_LOG` environment variable for filters ++/// and `MY_LOG_STYLE` for writing colors: ++/// ++/// ``` ++/// use env_logger::{Builder, Env}; ++/// ++/// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); ++/// ++/// env_logger::init_from_env(env); ++/// ``` ++/// ++/// # Panics ++/// ++/// This function will panic if it is called more than once, or if another ++/// library has already initialized a global logger. ++pub fn init_from_env<'a, E>(env: E) ++where ++ E: Into> ++{ ++ try_init_from_env(env).expect("env_logger::init_from_env should not be called after logger initialized"); ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn env_get_filter_reads_from_var_if_set() { ++ env::set_var("env_get_filter_reads_from_var_if_set", "from var"); ++ ++ let env = Env::new().filter_or("env_get_filter_reads_from_var_if_set", "from default"); ++ ++ assert_eq!(Some("from var".to_owned()), env.get_filter()); ++ } ++ ++ #[test] ++ fn env_get_filter_reads_from_default_if_var_not_set() { ++ env::remove_var("env_get_filter_reads_from_default_if_var_not_set"); ++ ++ let env = Env::new().filter_or("env_get_filter_reads_from_default_if_var_not_set", "from default"); ++ ++ assert_eq!(Some("from default".to_owned()), env.get_filter()); ++ } ++ ++ #[test] ++ fn env_get_write_style_reads_from_var_if_set() { ++ env::set_var("env_get_write_style_reads_from_var_if_set", "from var"); ++ ++ let env = Env::new().write_style_or("env_get_write_style_reads_from_var_if_set", "from default"); ++ ++ assert_eq!(Some("from var".to_owned()), env.get_write_style()); ++ } ++ ++ #[test] ++ fn env_get_write_style_reads_from_default_if_var_not_set() { ++ env::remove_var("env_get_write_style_reads_from_default_if_var_not_set"); ++ ++ let env = Env::new().write_style_or("env_get_write_style_reads_from_default_if_var_not_set", "from default"); ++ ++ assert_eq!(Some("from default".to_owned()), env.get_write_style()); ++ } ++} diff --cc vendor/env_logger-0.5.12/tests/log-in-log.rs index 000000000,000000000..6b2c47e7a new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/tests/log-in-log.rs @@@ -1,0 -1,0 +1,38 @@@ ++#[macro_use] extern crate log; ++extern crate env_logger; ++ ++use std::process; ++use std::fmt; ++use std::env; ++use std::str; ++ ++struct Foo; ++ ++impl fmt::Display for Foo { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ info!("test"); ++ f.write_str("bar") ++ } ++} ++ ++fn main() { ++ env_logger::init(); ++ if env::var("YOU_ARE_TESTING_NOW").is_ok() { ++ return info!("{}", Foo); ++ } ++ ++ let exe = env::current_exe().unwrap(); ++ let out = process::Command::new(exe) ++ .env("YOU_ARE_TESTING_NOW", "1") ++ .env("RUST_LOG", "debug") ++ .output() ++ .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); ++ if out.status.success() { ++ return ++ } ++ ++ println!("test failed: {}", out.status); ++ println!("--- stdout\n{}", str::from_utf8(&out.stdout).unwrap()); ++ println!("--- stderr\n{}", str::from_utf8(&out.stderr).unwrap()); ++ process::exit(1); ++} diff --cc vendor/env_logger-0.5.12/tests/regexp_filter.rs index 000000000,000000000..d23e9223e new file mode 100644 --- /dev/null +++ b/vendor/env_logger-0.5.12/tests/regexp_filter.rs @@@ -1,0 -1,0 +1,51 @@@ ++#[macro_use] extern crate log; ++extern crate env_logger; ++ ++use std::process; ++use std::env; ++use std::str; ++ ++fn main() { ++ if env::var("LOG_REGEXP_TEST").ok() == Some(String::from("1")) { ++ child_main(); ++ } else { ++ parent_main() ++ } ++} ++ ++fn child_main() { ++ env_logger::init(); ++ info!("XYZ Message"); ++} ++ ++fn run_child(rust_log: String) -> bool { ++ let exe = env::current_exe().unwrap(); ++ let out = process::Command::new(exe) ++ .env("LOG_REGEXP_TEST", "1") ++ .env("RUST_LOG", rust_log) ++ .output() ++ .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); ++ str::from_utf8(out.stderr.as_ref()).unwrap().contains("XYZ Message") ++} ++ ++fn assert_message_printed(rust_log: &str) { ++ if !run_child(rust_log.to_string()) { ++ panic!("RUST_LOG={} should allow the test log message", rust_log) ++ } ++} ++ ++fn assert_message_not_printed(rust_log: &str) { ++ if run_child(rust_log.to_string()) { ++ panic!("RUST_LOG={} should not allow the test log message", rust_log) ++ } ++} ++ ++fn parent_main() { ++ // test normal log severity levels ++ assert_message_printed("info"); ++ assert_message_not_printed("warn"); ++ ++ // test of regular expression filters ++ assert_message_printed("info/XYZ"); ++ assert_message_not_printed("info/XXX"); ++} diff --cc vendor/failure-0.1.2/.cargo-checksum.json index 000000000,000000000..8f8df6d14 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"7efb22686e4a466b1ec1a15c2898f91fa9cb340452496dca654032de20ff95b9"} diff --cc vendor/failure-0.1.2/.gitlab-ci.yml index 000000000,000000000..a9cbe35c3 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/.gitlab-ci.yml @@@ -1,0 -1,0 +1,10 @@@ ++image: "rust:latest" ++ ++pages: ++ script: ++ - sh ./build-docs.sh ++ artifacts: ++ paths: ++ - public ++ only: ++ - master diff --cc vendor/failure-0.1.2/.travis.yml index 000000000,000000000..61d896575 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/.travis.yml @@@ -1,0 -1,0 +1,11 @@@ ++language: rust ++rust: ++ - 1.18.0 ++ - stable ++ - beta ++ - nightly ++cache: cargo ++script: ++ - cargo test ++ - cargo test --features backtrace ++ - cargo check --no-default-features diff --cc vendor/failure-0.1.2/CODE_OF_CONDUCT.md index 000000000,000000000..a2161d0d4 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/CODE_OF_CONDUCT.md @@@ -1,0 -1,0 +1,46 @@@ ++# Contributor Covenant Code of Conduct ++ ++## Our Pledge ++ ++In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ++ ++## Our Standards ++ ++Examples of behavior that contributes to creating a positive environment include: ++ ++* Using welcoming and inclusive language ++* Being respectful of differing viewpoints and experiences ++* Gracefully accepting constructive criticism ++* Focusing on what is best for the community ++* Showing empathy towards other community members ++ ++Examples of unacceptable behavior by participants include: ++ ++* The use of sexualized language or imagery and unwelcome sexual attention or advances ++* Trolling, insulting/derogatory comments, and personal or political attacks ++* Public or private harassment ++* Publishing others' private information, such as a physical or electronic address, without explicit permission ++* Other conduct which could reasonably be considered inappropriate in a professional setting ++ ++## Our Responsibilities ++ ++Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. ++ ++Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ++ ++## Scope ++ ++This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ++ ++## Enforcement ++ ++Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at boats@mozilla.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. ++ ++Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ++ ++## Attribution ++ ++This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] ++ ++[homepage]: http://contributor-covenant.org ++[version]: http://contributor-covenant.org/version/1/4/ diff --cc vendor/failure-0.1.2/Cargo.toml index 000000000,000000000..89d0c0d61 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/Cargo.toml @@@ -1,0 -1,0 +1,33 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "failure" ++version = "0.1.2" ++authors = ["Without Boats "] ++description = "Experimental error handling abstraction." ++homepage = "https://boats.gitlab.io/failure" ++documentation = "https://docs.rs/failure" ++license = "MIT OR Apache-2.0" ++repository = "https://github.com/rust-lang-nursery/failure" ++[dependencies.backtrace] ++version = "0.3.3" ++optional = true ++ ++[dependencies.failure_derive] ++version = "0.1.2" ++optional = true ++ ++[features] ++default = ["std", "derive"] ++derive = ["failure_derive"] ++std = ["backtrace"] diff --cc vendor/failure-0.1.2/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/failure-0.1.2/LICENSE-MIT index 000000000,000000000..31aa79387 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/LICENSE-MIT @@@ -1,0 -1,0 +1,23 @@@ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/failure-0.1.2/Makefile index 000000000,000000000..6ff732985 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/Makefile @@@ -1,0 -1,0 +1,15 @@@ ++all: test ++.PHONY: all ++ ++test: ++ @echo TEST DEFAULT FEATURES ++ @cargo test --all ++ @echo TEST WITH BACKTRACE ++ @cargo test --features backtrace --all ++ @echo TEST NO DEFAULT FEATURES ++ @cargo check --no-default-features --all ++.PHONY: test ++ ++check: ++ @cargo check --all ++.PHONY: check diff --cc vendor/failure-0.1.2/README.md index 000000000,000000000..2f228321b new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/README.md @@@ -1,0 -1,0 +1,119 @@@ ++# failure - a new error management story ++ ++[![Build Status](https://travis-ci.org/rust-lang-nursery/failure.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/failure) ++[![Latest Version](https://img.shields.io/crates/v/failure.svg)](https://crates.io/crates/failure) ++[![docs](https://docs.rs/failure/badge.svg)](https://docs.rs/failure) ++ ++`failure` is designed to make it easier to manage errors in Rust. It is ++intended to replace error management based on `std::error::Error` with a new ++system based on lessons learned over the past several years, including those ++learned from experience with quick-error and error-chain. ++ ++`failure` provides two core components: ++ ++* `Fail`: A new trait for custom error types. ++* `Error`: A struct which any type that implements `Fail` can be cast into. ++ ++## Evolution ++ ++Failure is currently evolving as a library. First of all there is work going ++on in Rust itself to [fix the error trait](https://github.com/rust-lang/rfcs/pull/2504) ++secondarily the original plan for Failure towards 1.0 is unlikely to happen ++in the current form. ++ ++As such the original master branch towards 1.0 of failure was removed and ++master now represents the future iteration steps of 0.1 until it's clear ++what happens in the stdlib. ++ ++The original 1.0 branch can be found in [evolution/1.0](https://github.com/rust-lang-nursery/failure/tree/evolution/1.0). ++ ++## Example ++ ++```rust ++extern crate serde; ++extern crate toml; ++ ++#[macro_use] extern crate failure; ++#[macro_use] extern crate serde_derive; ++ ++use std::collections::HashMap; ++use std::path::PathBuf; ++use std::str::FromStr; ++ ++use failure::Error; ++ ++// This is a new error type that you've created. It represents the ways a ++// toolchain could be invalid. ++// ++// The custom derive for Fail derives an impl of both Fail and Display. ++// We don't do any other magic like creating new types. ++#[derive(Debug, Fail)] ++enum ToolchainError { ++ #[fail(display = "invalid toolchain name: {}", name)] ++ InvalidToolchainName { ++ name: String, ++ }, ++ #[fail(display = "unknown toolchain version: {}", version)] ++ UnknownToolchainVersion { ++ version: String, ++ } ++} ++ ++pub struct ToolchainId { ++ // ... etc ++} ++ ++impl FromStr for ToolchainId { ++ type Err = ToolchainError; ++ ++ fn from_str(s: &str) -> Result { ++ // ... etc ++ } ++} ++ ++pub type Toolchains = HashMap; ++ ++// This opens a toml file containing associations between ToolchainIds and ++// Paths (the roots of those toolchains). ++// ++// This could encounter an io Error, a toml parsing error, or a ToolchainError, ++// all of them will be thrown into the special Error type ++pub fn read_toolchains(path: PathBuf) -> Result ++{ ++ use std::fs::File; ++ use std::io::Read; ++ ++ let mut string = String::new(); ++ File::open(path)?.read_to_string(&mut string)?; ++ ++ let toml: HashMap = toml::from_str(&string)?; ++ ++ let toolchains = toml.iter().map(|(key, path)| { ++ let toolchain_id = key.parse()?; ++ Ok((toolchain_id, path)) ++ }).collect::>()?; ++ ++ Ok(toolchains) ++} ++``` ++ ++## Requirements ++ ++Both failure and failure_derive are intended to compile on all stable versions ++of Rust newer than 1.18.0, as well as the latest beta and the latest nightly. ++If either crate fails to compile on any version newer than 1.18.0, please open ++an issue. ++ ++failure is **no_std** compatible, though some aspects of it (primarily the ++`Error` type) will not be available in no_std mode. ++ ++## License ++ ++failure is licensed under the terms of the MIT License or the Apache License ++2.0, at your choosing. ++ ++## Code of Conduct ++ ++Contribution to the failure crate is organized under the terms of the ++Contributor Covenant, the maintainer of failure, @withoutboats, promises to ++intervene to uphold that code of conduct. diff --cc vendor/failure-0.1.2/RELEASES.md index 000000000,000000000..f4263fe31 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/RELEASES.md @@@ -1,0 -1,0 +1,17 @@@ ++# Version 0.1.1 ++ ++- Add a `Causes` iterator, which iterates over the causes of a failure. Can be ++ accessed through the `Fail::causes` or `Error::causes` methods. ++- Add the `bail!` macro, which "throws" from the function. ++- Add the `ensure!` macro, which is like an "assert" which throws instead of ++ panicking. ++- The derive now supports a no_std mode. ++- The derive is re-exported from `failure` by default, so that users do not ++ have to directly depend on `failure_derive`. ++- Add a impl of `From for Context`, allowing users to `?` the `D` type to ++ produce a `Context` (for cases where there is no further underlying ++ error). ++ ++# Version 0.1.0 ++ ++- Initial version. diff --cc vendor/failure-0.1.2/book/src/SUMMARY.md index 000000000,000000000..20a86d9c7 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/SUMMARY.md @@@ -1,0 -1,0 +1,13 @@@ ++# Summary ++ ++- [failure](./intro.md) ++- [How to use failure](./howto.md) ++ - [The Fail trait](./fail.md) ++ - [Deriving Fail](./derive-fail.md) ++ - [The Error type](./error.md) ++ - [`bail!` and `ensure!`](./bail-and-ensure.md) ++- [Patterns & Guidance](./guidance.md) ++ - [Strings as errors](./error-msg.md) ++ - [A Custom Fail type](./custom-fail.md) ++ - [Using the Error type](./use-error.md) ++ - [An Error and ErrorKind pair](./error-errorkind.md) diff --cc vendor/failure-0.1.2/book/src/bail-and-ensure.md index 000000000,000000000..1326f0e05 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/bail-and-ensure.md @@@ -1,0 -1,0 +1,18 @@@ ++# `bail!` and `ensure!` ++ ++If you were a fan of the `bail!` and ensure! macros from error-chain, good news. failure has a version of these macros as well. ++ ++The `bail!` macro returns an error immediately, based on a format string. The `ensure!` macro additionally takes a conditional, and returns the error only if that conditional is false. You can think of `bail!` and `ensure!` as being analogous to `panic!` and `assert!`, but throwing errors instead of panicking. ++ ++`bail!` and `ensure!` macros are useful when you are prototyping and you want to write your custom errors later. It is also the simplest example of using the failure crate. ++ ++## Example ++```rust ++#[macro_use] extern crate failure; ++ ++fn safe_cast_to_unsigned(n:i32) -> Result ++{ ++ ensure!(n>=0, "number cannot be smaller than 0!"); ++ (u32) n ++} ++``` diff --cc vendor/failure-0.1.2/book/src/custom-fail.md index 000000000,000000000..324c0417b new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/custom-fail.md @@@ -1,0 -1,0 +1,75 @@@ ++# A Custom Fail type ++ ++This pattern is a way to define a new kind of failure. Defining a new kind of ++failure can be an effective way of representing an error for which you control ++all of the possible failure cases. It has several advantages: ++ ++1. You can enumerate exactly all of the possible failures that can occur in ++this context. ++2. You have total control over the representation of the failure type. ++3. Callers can destructure your error without any sort of downcasting. ++ ++To implement this pattern, you should define your own type that implements ++`Fail`. You can use the [custom derive][derive-fail] to make this easier. For ++example: ++ ++```rust ++#[derive(Fail, Debug)] ++#[fail(display = "Input was invalid UTF-8")] ++pub struct Utf8Error; ++``` ++ ++This type can become as large and complicated as is appropriate to your use ++case. It can be an enum with a different variant for each possible error, and ++it can carry data with more precise information about the error. For example: ++ ++```rust ++#[derive(Fail, Debug)] ++#[fail(display = "Input was invalid UTF-8 at index {}", index)] ++pub struct Utf8Error { ++ index: usize, ++} ++``` ++ ++## When might you use this pattern? ++ ++If you need to raise an error that doesn't come from one of your dependencies, ++this is a great pattern to use. ++ ++You can also use this pattern in conjunction with [using `Error`][use-error] or ++defining an [Error and ErrorKind pair][error-errorkind]. Those functions which ++are "pure logic" and have a very constrained set of errors (such as parsing ++simple formats) might each return a different custom Fail type, and then the ++function which merges them all together, does IO, and so on, would return a ++more complex type like `Error` or your custom Error/ErrorKind. ++ ++## Caveats on this pattern ++ ++When you have a dependency which returns a different error type, often you will ++be inclined to add it as a variant on your own error type. When you do that, ++you should tag the underlying error as the `#[fail(cause)]` of your error: ++ ++```rust ++#[derive(Fail, Debug)] ++pub enum MyError { ++ #[fail(display = "Input was invalid UTF-8 at index {}", _0)] ++ Utf8Error(usize), ++ #[fail(display = "{}", _0)] ++ Io(#[fail(cause)] io::Error), ++} ++``` ++ ++Up to a limit, this design can work. However, it has some problems: ++ ++- It can be hard to be forward compatible with new dependencies that raise ++ their own kinds of errors in the future. ++- It defines a 1-1 relationship between a variant of the error and an ++ underlying error. ++ ++Depending on your use case, as your function grows in complexity, it can be ++better to transition to [using Error][use-error] or [defining an Error & ++ErrorKind pair][error-errorkind]. ++ ++[derive-fail]: ./derive-fail.html ++[use-error]: ./use-error.html ++[error-errorkind]: ./error-errorkind.html diff --cc vendor/failure-0.1.2/book/src/derive-fail.md index 000000000,000000000..6fffd9918 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/derive-fail.md @@@ -1,0 -1,0 +1,177 @@@ ++# Deriving `Fail` ++ ++Though you can implement `Fail` yourself, we also provide a derive macro to ++generate the impl for you. To get access to this macro, you must tag the extern ++crate declaration with `#[macro_use]`, as in: ++ ++```rust ++#[macro_use] extern crate failure; ++``` ++ ++In its smallest form, deriving Fail looks like this: ++ ++```rust ++#[macro_use] extern crate failure; ++ ++use std::fmt; ++ ++#[derive(Fail, Debug)] ++struct MyError; ++ ++impl fmt::Display for MyError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "An error occurred.") ++ } ++} ++``` ++ ++All failures need to implement `Display`, so we have added an impl of ++Display. However, implementing `Display` is much more boilerplate than ++implementing `Fail` - this is why we support deriving `Display` for you. ++ ++## Deriving `Display` ++ ++You can derive an implementation of `Display` with a special attribute: ++ ++```rust ++#[macro_use] extern crate failure; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred.")] ++struct MyError; ++``` ++ ++This attribute will cause the `Fail` derive to also generate an impl of ++`Display`, so that you don't have to implement one yourself. ++ ++### String interpolation ++ ++String literals are not enough for error messages in many cases. Often, you ++want to include parts of the error value interpolated into the message. You can ++do this with failure using the same string interpolation syntax as Rust's ++formatting and printing macros: ++ ++```rust ++#[macro_use] extern crate failure; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred with error code {}. ({})", code, message)] ++struct MyError { ++ code: i32, ++ message: String, ++} ++``` ++ ++Note that unlike code that would appear in a method, this does not use ++something like `self.code` or `self.message`; it just uses the field names ++directly. This is because of a limitation in Rust's current attribute syntax. ++As a result, you can only interpolate fields through the derivation; you cannot ++perform method calls or use other arbitrary expressions. ++ ++### Tuple structs ++ ++With regular structs, you can use the name of the field in string ++interpolation. When deriving Fail for a tuple struct, you might expect to use ++the numeric index to refer to fields `0`, `1`, et cetera. However, a compiler ++limitation prevents this from parsing today. ++ ++For the time being, tuple field accesses in the display attribute need to be ++prefixed with an underscore: ++ ++```rust ++#[macro_use] extern crate failure; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred with error code {}.", _0)] ++struct MyError(i32); ++ ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred with error code {} ({}).", _0, _1)] ++struct MyOtherError(i32, String); ++``` ++ ++### Enums ++ ++Implementing Display is also supported for enums by applying the attribute to ++each variant of the enum, rather than to the enum as a whole. The Display impl ++will match over the enum to generate the correct error message. For example: ++ ++```rust ++#[macro_use] extern crate failure; ++ ++#[derive(Fail, Debug)] ++enum MyError { ++ #[fail(display = "{} is not a valid version.", _0)] ++ InvalidVersion(u32), ++ #[fail(display = "IO error: {}", error)] ++ IoError { error: io::Error }, ++ #[fail(display = "An unknown error has occurred.")] ++ UnknownError, ++} ++``` ++ ++## Overriding `backtrace` ++ ++The backtrace method will be automatically overridden if the type contains a ++field with the type `Backtrace`. This works for both structs and enums. ++ ++```rust ++#[macro_use] extern crate failure; ++ ++use failure::Backtrace; ++ ++/// MyError::backtrace will return a reference to the backtrace field ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred.")] ++struct MyError { ++ backtrace: Backtrace, ++} ++ ++/// MyEnumError::backtrace will return a reference to the backtrace only if it ++/// is Variant2, otherwise it will return None. ++#[derive(Fail, Debug)] ++enum MyEnumError { ++ #[fail(display = "An error occurred.")] ++ Variant1, ++ #[fail(display = "A different error occurred.")] ++ Variant2(Backtrace), ++} ++``` ++ ++This happens automatically; no other annotations are necessary. It only works ++if the type is named Backtrace, and not if you have created an alias for the ++Backtrace type. ++ ++## Overriding `cause` ++ ++In contrast to `backtrace`, the cause cannot be determined by type name alone ++because it could be any type which implements `Fail`. For this reason, if your ++error has an underlying cause field, you need to annotate that field with ++the `#[fail(cause)]` attribute. ++ ++This can be used in fields of enums as well as structs. ++ ++ ++```rust ++#[macro_use] extern crate failure; ++ ++use std::io; ++ ++/// MyError::cause will return a reference to the io_error field ++#[derive(Fail, Debug)] ++#[fail(display = "An error occurred.")] ++struct MyError { ++ #[fail(cause)] io_error: io::Error, ++} ++ ++/// MyEnumError::cause will return a reference only if it is Variant2, ++/// otherwise it will return None. ++#[derive(Fail, Debug)] ++enum MyEnumError { ++ #[fail(display = "An error occurred.")] ++ Variant1, ++ #[fail(display = "A different error occurred.")] ++ Variant2(#[fail(cause)] io::Error), ++} ++``` diff --cc vendor/failure-0.1.2/book/src/error-errorkind.md index 000000000,000000000..c5968e813 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/error-errorkind.md @@@ -1,0 -1,0 +1,143 @@@ ++# An Error and ErrorKind pair ++ ++This pattern is the most robust way to manage errors - and also the most high ++maintenance. It combines some of the advantages of the [using Error][use-error] ++pattern and the [custom failure][custom-fail] patterns, while avoiding some of ++the disadvantages each of those patterns has: ++ ++1. Like `Error`, this is forward compatible with new underlying kinds of ++errors from your dependencies. ++2. Like custom failures, this pattern allows you to specify additional information about the error that your dependencies don't give you. ++3. Like `Error`, it can be easier to convert underlying errors from dependency ++into this type than for custom failures. ++4. Like custom failures, users can gain some information about the error ++without downcasting. ++ ++The pattern is to create two new failure types: an `Error` and an `ErrorKind`, ++and to leverage [the `Context` type][context-api] provided by failure. ++ ++```rust ++#[derive(Debug)] ++struct MyError { ++ inner: Context, ++} ++ ++#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)] ++enum MyErrorKind { ++ // A plain enum with no data in any of its variants ++ // ++ // For example: ++ #[fail(display = "A contextual error message.")] ++ OneVariant, ++ // ... ++} ++``` ++ ++Unfortunately, it is not easy to correctly derive `Fail` for `MyError` so that ++it delegates things to its inner `Context`. You should write those impls ++yourself: ++ ++```rust ++impl Fail for MyError { ++ fn cause(&self) -> Option<&Fail> { ++ self.inner.cause() ++ } ++ ++ fn backtrace(&self) -> Option<&Backtrace> { ++ self.inner.backtrace() ++ } ++} ++ ++impl Display for MyError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ Display::fmt(&self.inner, f) ++ } ++} ++``` ++ ++You should also provide some conversions and accessors, to go between a ++Context, your ErrorKind, and your Error: ++ ++```rust ++impl MyError { ++ pub fn kind(&self) -> MyErrorKind { ++ *self.inner.get_context() ++ } ++} ++ ++impl From for MyError { ++ fn from(kind: MyErrorKind) -> MyError { ++ MyError { inner: Context::new(kind) } ++ } ++} ++ ++impl From> for MyError { ++ fn from(inner: Context) -> MyError { ++ MyError { inner: inner } ++ } ++} ++``` ++ ++With this code set up, you can use the context method from failure to apply ++your ErrorKind to `Result`s in underlying libraries: ++ ++```rust ++use failure::ResultExt; ++perform_some_io().context(ErrorKind::NetworkFailure)?; ++``` ++ ++You can also directly throw `ErrorKind` without an underlying error when ++appropriate: ++ ++```rust ++Err(ErrorKind::DomainSpecificError)? ++``` ++ ++### What should your ErrorKind contain? ++ ++Your error kind probably should not carry data - and if it does, it should only ++carry stateless data types that provide additional information about what the ++`ErrorKind` means. This way, your `ErrorKind` can be `Eq`, making it ++easy to use as a way of comparing errors. ++ ++Your ErrorKind is a way of providing information about what errors mean ++appropriate to the level of abstraction that your library operates at. As some ++examples: ++ ++- If your library expects to read from the user's `Cargo.toml`, you might have ++ a `InvalidCargoToml` variant, to capture what `io::Error` and `toml::Error` ++ mean in the context of your library. ++- If your library does both file system activity and network activity, you ++ might have `Filesystem` and `Network` variants, to divide up the `io::Error`s ++ between which system in particular failed. ++ ++Exactly what semantic information is appropriate depends entirely on what this ++bit of code is intended to do. ++ ++## When might you use this pattern? ++ ++The most likely use cases for this pattern are mid-layer which perform a ++function that requires many dependencies, and that are intended to be used in ++production. Libraries with few dependencies do not need to manage many ++underlying error types and can probably suffice with a simpler [custom ++failure][custom-fail]. Applications that know they are almost always just going ++to log these errors can get away with [using the Error type][use-error] rather ++than managing extra context information. ++ ++That said, when you need to provide the most expressive information about an ++error possible, this can be a good approach. ++ ++## Caveats on this pattern ++ ++This pattern is the most involved pattern documented in this book. It involves ++a lot of boilerplate to set up (which may be automated away eventually), and it ++requires you to apply a contextual message to every underlying error that is ++thrown inside your code. It can be a lot of work to maintain this pattern. ++ ++Additionally, like the Error type, the Context type may use an allocation and a ++dynamic dispatch internally. If you know this is too expensive for your use ++case, you should not use this pattern. ++ ++[use-error]: ./use-error.html ++[custom-fail]: ./custom-fail.html ++[context-api]: https://boats.gitlab.io/failure/doc/failure/struct.Context.html diff --cc vendor/failure-0.1.2/book/src/error-msg.md index 000000000,000000000..e754aa93b new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/error-msg.md @@@ -1,0 -1,0 +1,59 @@@ ++# Strings as errors ++ ++This pattern is a way to create new errors without doing much set up. It is ++definitely the sloppiest way to throw errors. It can be great to use this ++during prototyping, but maybe not in the final product. ++ ++String types do not implement `Fail`, which is why there are two adapters to ++create failures from a string: ++ ++- [`failure::err_msg`][err-msg-api] - a function that takes a displayable ++ type and creates a failure from it. This can take a String or a string ++ literal. ++- [`format_err!`][format-err-api] - a macro with string interpolation, similar ++ to `format!` or `println!`. ++ ++```rust ++fn check_range(x: usize, range: Range) -> Result { ++ if x < range.start { ++ return Err(format_err!("{} is below {}", x, range.start)); ++ } ++ if x >= range.end { ++ return Err(format_err!("{} is above {}", x, range.end)); ++ } ++ Ok(x) ++} ++``` ++ ++If you're going to use strings as errors, we recommend [using ++`Error`][use-error] as your error type, rather than `ErrorMessage`; this way, ++if some of your strings are `String` and some are `&'static str`, you don't ++need worry about merging them into a single string type. ++ ++## When might you use this pattern? ++ ++This pattern is the easiest to set up and get going with, so it can be great ++when prototyping or spiking out an early design. It can also be great when you ++know that an error variant is extremely uncommon, and that there is really no ++way to handle it other than to log the error and move on. ++ ++## Caveats on this pattern ++ ++If you are writing a library you plan to publish to crates.io, this is probably ++not a good way to handle errors, because it doesn't give your clients very much ++control. For public, open source libraries, we'd recommend using [custom ++failures][custom-fail] in the cases where you would use a string as an error. ++ ++This pattern can also be very brittle. If you ever want to branch over which ++error was returned, you would have to match on the exact contents of the ++string. If you ever change the string contents, that will silently break that ++match. ++ ++For these reasons, we strongly recommend against using this pattern except for ++prototyping and when you know the error is just going to get logged or reported ++to the users. ++ ++[custom-fail]: ./custom-fail.html ++[use-error]: ./use-error.html ++[err-msg-api]: https://boats.gitlab.io/failure/doc/failure/fn.err_msg.html ++[format-err-api]: https://boats.gitlab.io/failure/doc/failure/macro.format_err.html diff --cc vendor/failure-0.1.2/book/src/error.md index 000000000,000000000..f37e4c3fc new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/error.md @@@ -1,0 -1,0 +1,100 @@@ ++# The `Error` type ++ ++In addition to the trait `Fail`, failure provides a type called `Error`. Any ++type that implements `Fail` can be cast into `Error` using From and Into, which ++allows users to throw errors using `?` which have different types, if the ++function returns an `Error`. ++ ++For example: ++ ++```rust ++// Something you can deserialize ++#[derive(Deserialize)] ++struct Object { ++ ... ++} ++ ++impl Object { ++ // This throws both IO Errors and JSON Errors, but they both get converted ++ // into the Error type. ++ fn from_file(path: &Path) -> Result { ++ let mut string = String::new(); ++ File::open(path)?.read_to_string(&mut string)?; ++ let object = json::from_str(&string)?; ++ Ok(object) ++ } ++} ++``` ++ ++## Causes and Backtraces ++ ++The Error type has all of the methods from the Fail trait, with a few notable ++differences. Most importantly, the cause and backtrace methods on Error do not ++return Options - an Error is *guaranteed* to have a cause and a backtrace. ++ ++```rust ++// Both methods are guaranteed to return an &Fail and an &Backtrace ++println!("{}, {}", error.cause(), error.backtrace()) ++``` ++ ++An `Error`'s cause is always the failure that was cast into this `Error`. ++That failure may have further underlying causes. Unlike Fail, this means that ++the cause of an Error will have the same Display representation as the Error ++itself. ++ ++As to the error's guaranteed backtrace, when the conversion into the Error type ++happens, if the underlying failure does not provide a backtrace, a new ++backtrace is constructed pointing to that conversion point (rather than the ++origin of the error). This construction only happens if there is no underlying ++backtrace; if it does have a backtrace no new backtrace is constructed. ++ ++## Downcasting ++ ++The Error type also supports downcasting into any concrete Fail type. It can be ++downcast by reference or by value - when downcasting by value, the return type ++is `Result`, allowing you to get the error back out of it. ++ ++```rust ++match error.downcast::() { ++ Ok(io_error) => { ... } ++ Err(error) => { ... } ++} ++``` ++ ++## Implementation details ++ ++`Error` is essentially a trait object, but with some fanciness it may generate ++and store the backtrace if the underlying failure did not have one. In ++particular, we use a custom dynamically sized type to store the backtrace ++information inline with the trait object data. ++ ++```rust ++struct Error { ++ // Inner is a dynamically sized type ++ inner: Box>, ++} ++ ++struct Inner { ++ backtrace: Backtrace, ++ failure: F, ++} ++``` ++ ++By storing the backtrace in the heap this way, we avoid increasing the size of ++the Error type beyond that of two non-nullable pointers. This keeps the size of ++the `Result` type from getting too large, avoiding having a negative impact on ++the "happy path" of returning Ok. For example, a `Result<(), Error>` should be ++represented as a pair of nullable pointers, with the null case representing ++`Ok`. Similar optimizations can be applied to values up to at least a pointer ++in size. ++ ++To emphasize: Error is intended for use cases where the error case is ++considered relatively uncommon. This optimization makes the overhead of an ++error less than it otherwise would be for the Ok branch. In cases where errors ++are going to be returned extremely frequently, returning this Error type is ++probably not appropriate, but you should benchmark in those cases. ++ ++(As a rule of thumb: if you're not sure if you can afford to have a trait ++object, you probably *can* afford it. Heap allocations are not nearly as cheap ++as stack allocations, but they're cheap enough that you can almost always ++afford them.) diff --cc vendor/failure-0.1.2/book/src/fail.md index 000000000,000000000..566400551 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/fail.md @@@ -1,0 -1,0 +1,152 @@@ ++# The `Fail` trait ++ ++The `Fail` trait is a replacement for [`std::error::Error`][stderror]. It has ++been designed to support a number of operations: ++ ++- Because it is bound by both `Debug` and `Display`, any failure can be ++ printed in two ways. ++- It has both a `backtrace` and a `cause` method, allowing users to get ++ information about how the error occurred. ++- It supports wrapping failures in additional contextual information. ++- Because it is bound by `Send` and `Sync`, failures can be moved and share ++ between threads easily. ++- Because it is bound by `'static`, the abstract `Fail` trait object can be ++ downcast into concrete types. ++ ++Every new error type in your code should implement `Fail`, so it can be ++integrated into the entire system built around this trait. You can manually ++implement `Fail` yourself, or you can use the derive for `Fail` defined ++in a separate crate and documented [here][derive-docs]. ++ ++Implementors of this trait are called 'failures'. ++ ++## Cause ++ ++Often, an error type contains (or could contain) another underlying error type ++which represents the "cause" of this error - for example, if your custom error ++contains an `io::Error`, that is the cause of your error. ++ ++The cause method on the `Fail` trait allows all errors to expose their underlying ++cause - if they have one - in a consistent way. Users can loop over the chain ++of causes, for example, getting the entire series of causes for an error: ++ ++```rust ++// Assume err is a type that implements `Fail` ++let mut fail: &Fail = err; ++ ++while let Some(cause) = fail.cause() { ++ println!("{}", cause); ++ ++ // Make `fail` the reference to the cause of the previous fail, making the ++ // loop "dig deeper" into the cause chain. ++ fail = cause; ++} ++``` ++ ++Because `&Fail` supports downcasting, you can also inspect causes in more ++detail if you are expecting a certain failure: ++ ++```rust ++while let Some(cause) = fail.cause() { ++ ++ if let Some(err) = cause.downcast_ref::() { ++ // treat io::Error specially ++ } else { ++ // fallback case ++ } ++ ++ fail = cause; ++} ++``` ++ ++For convenience an iterator is also provided: ++ ++```rust ++// Assume err is a type that implements `Fail` ++let mut fail: &Fail = err; ++ ++for cause in fail.iter_causes() { ++ println!("{}", cause); ++} ++``` ++ ++## Backtraces ++ ++Errors can also generate a backtrace when they are constructed, helping you ++determine the place the error was generated and the function chain that called into ++that. Like causes, this is entirely optional - the authors of each failure ++have to decide if generating a backtrace is appropriate in their use case. ++ ++The backtrace method allows all errors to expose their backtrace if they have ++one. This enables a consistent method for getting the backtrace from an error: ++ ++```rust ++// We don't even know the type of the cause, but we can still get its ++// backtrace. ++if let Some(bt) = err.cause().and_then(|cause| cause.backtrace()) { ++ println!("{}", bt) ++} ++``` ++ ++The `Backtrace` type exposed by `failure` is different from the `Backtrace` exposed ++by the [backtrace crate][backtrace-crate], in that it has several optimizations: ++ ++- It has a `no_std` compatible form which will never be generated (because ++ backtraces require heap allocation), and should be entirely compiled out. ++- It will not be generated unless the `RUST_BACKTRACE` environment variable has ++ been set at runtime. ++- Symbol resolution is delayed until the backtrace is actually printed, because ++ this is the most expensive part of generating a backtrace. ++ ++## Context ++ ++Often, the libraries you are using will present error messages that don't ++provide very helpful information about what exactly has gone wrong. For ++example, if an `io::Error` says that an entity was "Not Found," that doesn't ++communicate much about what specific file was missing - if it even was a file ++(as opposed to a directory for example). ++ ++You can inject additional context to be carried with this error value, ++providing semantic information about the nature of the error appropriate to the ++level of abstraction that the code you are writing operates at. The `context` ++method on `Fail` takes any displayable value (such as a string) to act as ++context for this error. ++ ++Using the `ResultExt` trait, you can also get `context` as a convenient method on ++`Result` directly. For example, suppose that your code attempted to read from a ++Cargo.toml. You can wrap the `io::Error`s that occur with additional context ++about what operation has failed: ++ ++```rust ++use failure::ResultExt; ++ ++let mut file = File::open(cargo_toml_path).context("Missing Cargo.toml")?; ++file.read_to_end(&buffer).context("Could not read Cargo.toml")?; ++``` ++ ++The `Context` object also has a constructor that does not take an underlying ++error, allowing you to create ad hoc Context errors alongside those created by ++applying the `context` method to an underlying error. ++ ++## Backwards compatibility ++ ++We've taken several steps to make transitioning from `std::error` to `failure` as ++painless as possible. ++ ++First, there is a blanket implementation of `Fail` for all types that implement ++`std::error::Error`, as long as they are `Send + Sync + 'static`. If you are ++dealing with a library that hasn't shifted to `Fail`, it is automatically ++compatible with `failure` already. ++ ++Second, `Fail` contains a method called `compat`, which produces a type that ++implements `std::error::Error`. If you have a type that implements `Fail`, but ++not the older `Error` trait, you can call `compat` to get a type that does ++implement that trait (for example, if you need to return a `Box`). ++ ++The biggest hole in our backwards compatibility story is that you cannot ++implement `std::error::Error` and also override the backtrace and cause methods ++on `Fail`. We intend to enable this with specialization when it becomes stable. ++ ++[derive-docs]: https://boats.gitlab.io/failure/derive-fail.html ++[stderror]: https://doc.rust-lang.org/std/error/trait.Error.html ++[backtrace-crate]: http://alexcrichton.com/backtrace-rs diff --cc vendor/failure-0.1.2/book/src/guidance.md index 000000000,000000000..7023ca40d new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/guidance.md @@@ -1,0 -1,0 +1,24 @@@ ++# Patterns & Guidance ++ ++failure is not a "one size fits all" approach to error management. There are ++multiple patterns that emerge from the API this library provides, and users ++need to determine which pattern makes sense for them. This section documents ++some patterns and how users might use them. ++ ++In brief, these are the patterns documented here: ++ ++- **[Strings as errors](./error-msg.md):** Using strings as your error ++ type. Good for prototyping. ++- **[A Custom Fail type](./custom-fail.md):** Defining a custom type to be ++ your error type. Good for APIs where you control all or more of the ++ possible failures. ++- **[Using the Error type](./use-error.md):** Using the Error type to pull ++ together multiple failures of different types. Good for applications and ++ APIs that know the error won't be inspected much more. ++- **[An Error and ErrorKind pair](./error-errorkind.md):** Using both a ++ custom error type and an ErrorKind enum to create a very robust error ++ type. Good for public APIs in large crates. ++ ++(Though each of these items identifies a use case which this pattern would be ++good for, in truth each of them can be applied in various contexts. Its up to ++you to decide what makes the most sense for your particular use case.) diff --cc vendor/failure-0.1.2/book/src/howto.md index 000000000,000000000..5c8135b7a new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/howto.md @@@ -1,0 -1,0 +1,8 @@@ ++# How to use failure ++ ++This section of the documentation is about how the APIs exposed in failure can ++be used. It is organized around the major APIs of failure: ++ ++- **[The Fail trait](./fail.md):** The primary abstraction provided by failure. ++- **[Deriving Fail](./derive-fail.md):** A custom derive for the Fail trait. ++- **[The Error type](./error.md):** A convenient wrapper around any Fail type. diff --cc vendor/failure-0.1.2/book/src/intro.md index 000000000,000000000..d62093b11 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/intro.md @@@ -1,0 -1,0 +1,77 @@@ ++# failure ++ ++This is the documentation for the failure crate, which provides a system for ++creating and managing errors in Rust. Additional documentation is found here: ++ ++* [API documentation][api] ++* [failure source code][repo] ++ ++[api]: https://boats.gitlab.io/failure/doc/failure ++[repo]: https://github.com/rust-lang-nursery/failure ++ ++```rust ++extern crate serde; ++extern crate toml; ++ ++#[macro_use] extern crate failure; ++#[macro_use] extern crate serde_derive; ++ ++use std::collections::HashMap; ++use std::path::PathBuf; ++use std::str::FromStr; ++ ++use failure::Error; ++ ++// This is a new error type that you've created. It represents the ways a ++// toolchain could be invalid. ++// ++// The custom derive for Fail derives an impl of both Fail and Display. ++// We don't do any other magic like creating new types. ++#[derive(Debug, Fail)] ++enum ToolchainError { ++ #[fail(display = "invalid toolchain name: {}", name)] ++ InvalidToolchainName { ++ name: String, ++ }, ++ #[fail(display = "unknown toolchain version: {}", version)] ++ UnknownToolchainVersion { ++ version: String, ++ } ++} ++ ++pub struct ToolchainId { ++ // ... etc ++} ++ ++impl FromStr for ToolchainId { ++ type Err = ToolchainError; ++ ++ fn from_str(s: &str) -> Result { ++ // ... etc ++ } ++} ++ ++pub type Toolchains = HashMap; ++ ++// This opens a toml file containing associations between ToolchainIds and ++// Paths (the roots of those toolchains). ++// ++// This could encounter an io Error, a toml parsing error, or a ToolchainError, ++// all of them will be thrown into the special Error type ++pub fn read_toolchains(path: PathBuf) -> Result ++{ ++ use std::fs::File; ++ use std::io::Read; ++ ++ let mut string = String::new(); ++ File::open(path)?.read_to_string(&mut string)?; ++ ++ let toml: HashMap = toml::from_str(&string)?; ++ ++ let toolchains = toml.iter().map(|(key, path)| { ++ let toolchain_id = key.parse()?; ++ Ok((toolchain_id, path)) ++ }).collect::>()?; ++ ++ Ok(toolchains) ++} diff --cc vendor/failure-0.1.2/book/src/use-error.md index 000000000,000000000..a0a294402 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/book/src/use-error.md @@@ -1,0 -1,0 +1,66 @@@ ++# Use the `Error` type ++ ++This pattern is a way to manage errors when you have multiple kinds of failure ++that could occur during a single function. It has several distinct advantages: ++ ++1. You can start using it without defining any of your own failure types. ++2. All types that implement `Fail` can be thrown into the `Error` type using ++the `?` operator. ++3. As you start adding new dependencies with their own failure types, you can ++start throwing them without making a breaking change. ++ ++To use this pattern, all you need to do is return `Result<_, Error>` from your ++functions: ++ ++```rust ++use std::io; ++use std::io::BufRead; ++ ++use failure::Error; ++use failure::err_msg; ++ ++fn my_function() -> Result<(), Error> { ++ let stdin = io::stdin(); ++ ++ for line in stdin.lock().lines() { ++ let line = line?; ++ ++ if line.chars().all(|c| c.is_whitespace()) { ++ break ++ } ++ ++ if !line.starts_with("$") { ++ return Err(format_err!("Input did not begin with `$`")); ++ } ++ ++ println!("{}", &line[1..]); ++ } ++ ++ Ok(()) ++} ++``` ++ ++## When might you use this pattern? ++ ++This pattern is very effective when you know you will usually not need to ++destructure the error this function returns. For example: ++ ++- When prototyping. ++- When you know you are going to log this error, or display it to the user, ++ either all of the time or nearly all of the time. ++- When it would be impractical for this API to report more custom context for ++ the error (e.g. because it is a trait that doesn't want to add a new Error ++ associated type). ++ ++## Caveats on this pattern ++ ++There are two primary downsides to this pattern: ++ ++- The `Error` type allocates. There are cases where this would be too ++ expensive. In those cases you should use a [custom failure][custom-fail]. ++- You cannot recover more information about this error without downcasting. If ++ your API needs to express more contextual information about the error, use ++ the [Error and ErrorKind][error-errorkind] pattern. ++ ++[custom-fail]: ./custom-fail.html ++[error-errorkind]: ./error-errorkind.html diff --cc vendor/failure-0.1.2/build-docs.sh index 000000000,000000000..fe2039fbf new file mode 100755 --- /dev/null +++ b/vendor/failure-0.1.2/build-docs.sh @@@ -1,0 -1,0 +1,8 @@@ ++#!/bin/bash ++mkdir public ++cargo doc --no-deps ++cargo install mdbook --no-default-features ++mdbook build ./book ++cp -r ./target/doc/ ./public ++cp -r ./book/book/* ./public ++find $PWD/public | grep "\.html\$" diff --cc vendor/failure-0.1.2/examples/bail_ensure.rs index 000000000,000000000..05c399b5d new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/examples/bail_ensure.rs @@@ -1,0 -1,0 +1,26 @@@ ++#[macro_use] ++extern crate failure; ++ ++use failure::Error; ++ ++fn bailer() -> Result<(), Error> { ++ // bail!("ruh roh"); ++ bail!("ruh {}", "roh"); ++} ++ ++fn ensures() -> Result<(), Error> { ++ ensure!(true, "true is false"); ++ ensure!(false, "false is false"); ++ Ok(()) ++} ++ ++fn main() { ++ match bailer() { ++ Ok(_) => println!("ok"), ++ Err(e) => println!("{}", e), ++ } ++ match ensures() { ++ Ok(_) => println!("ok"), ++ Err(e) => println!("{}", e), ++ } ++} diff --cc vendor/failure-0.1.2/examples/simple.rs index 000000000,000000000..35d25e16a new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/examples/simple.rs @@@ -1,0 -1,0 +1,22 @@@ ++#[macro_use] ++extern crate failure; ++ ++use failure::Fail; ++ ++#[derive(Debug, Fail)] ++#[fail(display = "my error")] ++struct MyError; ++ ++#[derive(Debug, Fail)] ++#[fail(display = "my wrapping error")] ++struct WrappingError(#[fail(cause)] MyError); ++ ++fn bad_function() -> Result<(), WrappingError> { ++ Err(WrappingError(MyError)) ++} ++ ++fn main() { ++ for cause in Fail::iter_causes(&bad_function().unwrap_err()) { ++ println!("{}", cause); ++ } ++} diff --cc vendor/failure-0.1.2/src/backtrace/internal.rs index 000000000,000000000..7be137b75 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/backtrace/internal.rs @@@ -1,0 -1,0 +1,130 @@@ ++use std::cell::UnsafeCell; ++use std::env; ++use std::ffi::OsString; ++use std::fmt; ++use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; ++use std::sync::Mutex; ++ ++pub use super::backtrace::Backtrace; ++ ++const GENERAL_BACKTRACE: &str = "RUST_BACKTRACE"; ++const FAILURE_BACKTRACE: &str = "RUST_FAILURE_BACKTRACE"; ++ ++pub(super) struct InternalBacktrace { ++ backtrace: Option, ++} ++ ++struct MaybeResolved { ++ resolved: Mutex, ++ backtrace: UnsafeCell, ++} ++ ++unsafe impl Send for MaybeResolved {} ++unsafe impl Sync for MaybeResolved {} ++ ++impl InternalBacktrace { ++ pub(super) fn new() -> InternalBacktrace { ++ static ENABLED: AtomicUsize = ATOMIC_USIZE_INIT; ++ ++ match ENABLED.load(Ordering::SeqCst) { ++ 0 => { ++ let enabled = is_backtrace_enabled(|var| env::var_os(var)); ++ ENABLED.store(enabled as usize + 1, Ordering::SeqCst); ++ if !enabled { ++ return InternalBacktrace { backtrace: None } ++ } ++ } ++ 1 => return InternalBacktrace { backtrace: None }, ++ _ => {} ++ } ++ ++ InternalBacktrace { ++ backtrace: Some(MaybeResolved { ++ resolved: Mutex::new(false), ++ backtrace: UnsafeCell::new(Backtrace::new_unresolved()), ++ }), ++ } ++ } ++ ++ pub(super) fn none() -> InternalBacktrace { ++ InternalBacktrace { backtrace: None } ++ } ++ ++ pub(super) fn as_backtrace(&self) -> Option<&Backtrace> { ++ let bt = match self.backtrace { ++ Some(ref bt) => bt, ++ None => return None, ++ }; ++ let mut resolved = bt.resolved.lock().unwrap(); ++ unsafe { ++ if !*resolved { ++ (*bt.backtrace.get()).resolve(); ++ *resolved = true; ++ } ++ Some(&*bt.backtrace.get()) ++ } ++ } ++ ++ pub(super) fn is_none(&self) -> bool { ++ self.backtrace.is_none() ++ } ++} ++ ++impl fmt::Debug for InternalBacktrace { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("InternalBacktrace") ++ .field("backtrace", &self.as_backtrace()) ++ .finish() ++ } ++} ++ ++fn is_backtrace_enabled Option>(get_var: F) -> bool { ++ match get_var(FAILURE_BACKTRACE) { ++ Some(ref val) if val != "0" => true, ++ Some(ref val) if val == "0" => false, ++ _ => match get_var(GENERAL_BACKTRACE) { ++ Some(ref val) if val != "0" => true, ++ _ => false, ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ const YEA: Option<&str> = Some("1"); ++ const NAY: Option<&str> = Some("0"); ++ const NOT_SET: Option<&str> = None; ++ ++ macro_rules! test_enabled { ++ (failure: $failure:ident, general: $general:ident => $result:expr) => {{ ++ assert_eq!(is_backtrace_enabled(|var| match var { ++ FAILURE_BACKTRACE => $failure.map(OsString::from), ++ GENERAL_BACKTRACE => $general.map(OsString::from), ++ _ => panic!() ++ }), $result); ++ }} ++ } ++ ++ #[test] ++ fn always_enabled_if_failure_is_set_to_yes() { ++ test_enabled!(failure: YEA, general: YEA => true); ++ test_enabled!(failure: YEA, general: NOT_SET => true); ++ test_enabled!(failure: YEA, general: NAY => true); ++ } ++ ++ #[test] ++ fn never_enabled_if_failure_is_set_to_no() { ++ test_enabled!(failure: NAY, general: YEA => false); ++ test_enabled!(failure: NAY, general: NOT_SET => false); ++ test_enabled!(failure: NAY, general: NAY => false); ++ } ++ ++ #[test] ++ fn follows_general_if_failure_is_not_set() { ++ test_enabled!(failure: NOT_SET, general: YEA => true); ++ test_enabled!(failure: NOT_SET, general: NOT_SET => false); ++ test_enabled!(failure: NOT_SET, general: NAY => false); ++ } ++} diff --cc vendor/failure-0.1.2/src/backtrace/mod.rs index 000000000,000000000..58f0477b3 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/backtrace/mod.rs @@@ -1,0 -1,0 +1,144 @@@ ++use core::fmt::{self, Debug, Display}; ++ ++macro_rules! with_backtrace { ($($i:item)*) => ($(#[cfg(all(feature = "backtrace", feature = "std"))]$i)*) } ++macro_rules! without_backtrace { ($($i:item)*) => ($(#[cfg(not(all(feature = "backtrace", feature = "std")))]$i)*) } ++ ++without_backtrace! { ++ /// A `Backtrace`. ++ /// ++ /// This is an opaque wrapper around the backtrace provided by ++ /// libbacktrace. A variety of optimizations have been performed to avoid ++ /// unnecessary or ill-advised work: ++ /// ++ /// - If this crate is compiled in `no_std` compatible mode, `Backtrace` ++ /// is an empty struct, and will be completely compiled away. ++ /// - If this crate is run without the `RUST_BACKTRACE` environmental ++ /// variable enabled, the backtrace will not be generated at runtime. ++ /// - Even if a backtrace is generated, the most expensive part of ++ /// generating a backtrace is symbol resolution. This backtrace does not ++ /// perform symbol resolution until it is actually read (e.g. by ++ /// printing it). If the Backtrace is never used for anything, symbols ++ /// never get resolved. ++ /// ++ /// Even with these optimizations, including a backtrace in your failure ++ /// may not be appropriate to your use case. You are not required to put a ++ /// backtrace in a custom `Fail` type. ++ /// ++ /// > (We have detected that this crate was documented with no_std ++ /// > compatibility turned on. The version of this crate that has been ++ /// > documented here will never generate a backtrace.) ++ pub struct Backtrace { ++ _secret: (), ++ } ++ ++ impl Backtrace { ++ /// Constructs a new backtrace. This will only create a real backtrace ++ /// if the crate is compiled in std mode and the `RUST_BACKTRACE` ++ /// environmental variable is activated. ++ /// ++ /// > (We have detected that this crate was documented with no_std ++ /// > compatibility turned on. The version of this crate that has been ++ /// > documented here will never generate a backtrace.) ++ pub fn new() -> Backtrace { ++ Backtrace { _secret: () } ++ } ++ ++ #[cfg(feature = "std")] ++ pub(crate) fn none() -> Backtrace { ++ Backtrace { _secret: () } ++ } ++ ++ #[cfg(feature = "std")] ++ pub(crate) fn is_none(&self) -> bool { ++ true ++ } ++ } ++ ++ impl Default for Backtrace { ++ fn default() -> Backtrace { ++ Backtrace::new() ++ } ++ } ++ ++ impl Debug for Backtrace { ++ fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { ++ Ok(()) ++ } ++ } ++ ++ impl Display for Backtrace { ++ fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { ++ Ok(()) ++ } ++ } ++} ++ ++with_backtrace! { ++ extern crate backtrace; ++ ++ mod internal; ++ ++ use self::internal::InternalBacktrace; ++ ++ /// A `Backtrace`. ++ /// ++ /// This is an opaque wrapper around the backtrace provided by ++ /// libbacktrace. A variety of optimizations have been performed to avoid ++ /// unnecessary or ill-advised work: ++ /// ++ /// - If this crate is compiled in `no_std` compatible mode, `Backtrace` ++ /// is an empty struct, and will be completely compiled away. ++ /// - If this crate is run without the `RUST_BACKTRACE` environmental ++ /// variable enabled, the backtrace will not be generated at runtime. ++ /// - Even if a backtrace is generated, the most expensive part of ++ /// generating a backtrace is symbol resolution. This backtrace does not ++ /// perform symbol resolution until it is actually read (e.g. by ++ /// printing it). If the Backtrace is never used for anything, symbols ++ /// never get resolved. ++ /// ++ /// Even with these optimizations, including a backtrace in your failure ++ /// may not be appropriate to your use case. You are not required to put a ++ /// backtrace in a custom `Fail` type. ++ pub struct Backtrace { ++ internal: InternalBacktrace ++ } ++ ++ impl Backtrace { ++ /// Constructs a new backtrace. This will only create a real backtrace ++ /// if the crate is compiled in std mode and the `RUST_BACKTRACE` ++ /// environmental variable is activated. ++ pub fn new() -> Backtrace { ++ Backtrace { internal: InternalBacktrace::new() } ++ } ++ ++ pub(crate) fn none() -> Backtrace { ++ Backtrace { internal: InternalBacktrace::none() } ++ } ++ ++ pub(crate) fn is_none(&self) -> bool { ++ self.internal.is_none() ++ } ++ } ++ ++ impl Default for Backtrace { ++ fn default() -> Backtrace { ++ Backtrace::new() ++ } ++ } ++ ++ impl Debug for Backtrace { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ if let Some(bt) = self.internal.as_backtrace() { ++ bt.fmt(f) ++ } else { Ok(()) } ++ } ++ } ++ ++ impl Display for Backtrace { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ if let Some(bt) = self.internal.as_backtrace() { ++ bt.fmt(f) ++ } else { Ok(()) } ++ } ++ } ++} diff --cc vendor/failure-0.1.2/src/box_std.rs index 000000000,000000000..a58ae6666 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/box_std.rs @@@ -1,0 -1,0 +1,19 @@@ ++use std::error::Error; ++use std::fmt; ++use Fail; ++ ++pub struct BoxStd(pub Box); ++ ++impl fmt::Display for BoxStd { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ fmt::Display::fmt(&self.0, f) ++ } ++} ++ ++impl fmt::Debug for BoxStd { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ fmt::Debug::fmt(&self.0, f) ++ } ++} ++ ++impl Fail for BoxStd {} diff --cc vendor/failure-0.1.2/src/compat.rs index 000000000,000000000..9a47bdb24 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/compat.rs @@@ -1,0 -1,0 +1,47 @@@ ++use core::fmt::{self, Display}; ++ ++/// A compatibility wrapper around an error type from this crate. ++/// ++/// `Compat` implements `std::error::Error`, allowing the types from this ++/// crate to be passed to interfaces that expect a type of that trait. ++#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Default)] ++pub struct Compat { ++ pub(crate) error: E, ++} ++ ++impl Display for Compat { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ Display::fmt(&self.error, f) ++ } ++} ++ ++impl Compat { ++ /// Unwraps this into the inner error. ++ pub fn into_inner(self) -> E { ++ self.error ++ } ++ ++ /// Gets a reference to the inner error. ++ pub fn get_ref(&self) -> &E { ++ &self.error ++ } ++} ++ ++with_std! { ++ use std::fmt::Debug; ++ use std::error::Error as StdError; ++ ++ use Error; ++ ++ impl StdError for Compat { ++ fn description(&self) -> &'static str { ++ "An error has occurred." ++ } ++ } ++ ++ impl From for Box { ++ fn from(error: Error) -> Box { ++ Box::new(Compat { error }) ++ } ++ } ++} diff --cc vendor/failure-0.1.2/src/context.rs index 000000000,000000000..6e1fe90f9 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/context.rs @@@ -1,0 -1,0 +1,143 @@@ ++use core::fmt::{self, Debug, Display}; ++ ++use Fail; ++ ++without_std! { ++ /// An error with context around it. ++ /// ++ /// The context is intended to be a human-readable, user-facing explanation for the ++ /// error that has occurred. The underlying error is not assumed to be end-user-relevant ++ /// information. ++ /// ++ /// The `Display` impl for `Context` only prints the human-readable context, while the ++ /// `Debug` impl also prints the underlying error. ++ pub struct Context { ++ context: D, ++ } ++ ++ impl Context { ++ /// Creates a new context without an underlying error message. ++ pub fn new(context: D) -> Context { ++ Context { context } ++ } ++ ++ /// Returns a reference to the context provided with this error. ++ pub fn get_context(&self) -> &D { ++ &self.context ++ } ++ ++ pub(crate) fn with_err(context: D, _: E) -> Context { ++ Context { context } ++ } ++ } ++ ++ impl Fail for Context { } ++ ++ impl Debug for Context { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{}", self.context) ++ } ++ } ++ ++ impl Display for Context { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{}", self.context) ++ } ++ } ++} ++ ++with_std! { ++ use {Error, Backtrace}; ++ ++ /// An error with context around it. ++ /// ++ /// The context is intended to be a human-readable, user-facing explanation for the ++ /// error that has occurred. The underlying error is not assumed to be end-user-relevant ++ /// information. ++ /// ++ /// The `Display` impl for `Context` only prints the human-readable context, while the ++ /// `Debug` impl also prints the underlying error. ++ pub struct Context { ++ context: D, ++ failure: Either, ++ } ++ ++ impl Context { ++ /// Creates a new context without an underlying error message. ++ pub fn new(context: D) -> Context { ++ let failure = Either::This(Backtrace::new()); ++ Context { context, failure } ++ } ++ ++ /// Returns a reference to the context provided with this error. ++ pub fn get_context(&self) -> &D { ++ &self.context ++ } ++ ++ pub(crate) fn with_err>(context: D, error: E) -> Context { ++ let failure = Either::That(error.into()); ++ Context { context, failure } ++ } ++ } ++ ++ impl Fail for Context { ++ fn cause(&self) -> Option<&Fail> { ++ self.failure.as_cause() ++ } ++ ++ fn backtrace(&self) -> Option<&Backtrace> { ++ Some(self.failure.backtrace()) ++ } ++ } ++ ++ impl Debug for Context { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{:?}\n\n{}", self.failure, self.context) ++ } ++ } ++ ++ impl Display for Context { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{}", self.context) ++ } ++ } ++ ++ enum Either { ++ This(A), ++ That(B), ++ } ++ ++ impl Either { ++ fn backtrace(&self) -> &Backtrace { ++ match *self { ++ Either::This(ref backtrace) => backtrace, ++ Either::That(ref error) => error.backtrace(), ++ } ++ } ++ ++ fn as_cause(&self) -> Option<&Fail> { ++ match *self { ++ Either::This(_) => None, ++ Either::That(ref error) => Some(error.as_fail()) ++ } ++ } ++ } ++ ++ impl Debug for Either { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ match *self { ++ Either::This(ref backtrace) => write!(f, "{:?}", backtrace), ++ Either::That(ref error) => write!(f, "{:?}", error), ++ } ++ } ++ } ++} ++ ++impl From for Context ++where ++ D: Display + Send + Sync + 'static, ++{ ++ fn from(display: D) -> Context { ++ Context::new(display) ++ } ++} diff --cc vendor/failure-0.1.2/src/error/error_impl.rs index 000000000,000000000..f033c5629 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/error/error_impl.rs @@@ -1,0 -1,0 +1,59 @@@ ++use core::mem; ++use core::ptr; ++ ++use Fail; ++use backtrace::Backtrace; ++ ++pub(crate) struct ErrorImpl { ++ inner: Box>, ++} ++ ++struct Inner { ++ backtrace: Backtrace, ++ pub(crate) failure: F, ++} ++ ++impl From for ErrorImpl { ++ fn from(failure: F) -> ErrorImpl { ++ let inner: Inner = { ++ let backtrace = if failure.backtrace().is_none() { ++ Backtrace::new() ++ } else { Backtrace::none() }; ++ Inner { failure, backtrace } ++ }; ++ ErrorImpl { inner: Box::new(inner) } ++ } ++} ++ ++impl ErrorImpl { ++ pub(crate) fn failure(&self) -> &Fail { ++ &self.inner.failure ++ } ++ ++ pub(crate) fn failure_mut(&mut self) -> &mut Fail { ++ &mut self.inner.failure ++ } ++ ++ pub(crate) fn backtrace(&self) -> &Backtrace { ++ &self.inner.backtrace ++ } ++ ++ pub(crate) fn downcast(self) -> Result { ++ let ret: Option = self.failure().downcast_ref().map(|fail| { ++ unsafe { ++ // drop the backtrace ++ let _ = ptr::read(&self.inner.backtrace as *const Backtrace); ++ // read out the fail type ++ ptr::read(fail as *const T) ++ } ++ }); ++ match ret { ++ Some(ret) => { ++ // forget self (backtrace is dropped, failure is moved ++ mem::forget(self); ++ Ok(ret) ++ } ++ _ => Err(self) ++ } ++ } ++} diff --cc vendor/failure-0.1.2/src/error/error_impl_small.rs index 000000000,000000000..6ff7c78ec new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/error/error_impl_small.rs @@@ -1,0 -1,0 +1,132 @@@ ++use std::heap::{Heap, Alloc, Layout}; ++ ++use core::mem; ++use core::ptr; ++ ++use Fail; ++use backtrace::Backtrace; ++ ++pub(crate) struct ErrorImpl { ++ inner: &'static mut Inner, ++} ++ ++// Dynamically sized inner value ++struct Inner { ++ backtrace: Backtrace, ++ vtable: *const VTable, ++ failure: FailData, ++} ++ ++unsafe impl Send for Inner { } ++unsafe impl Sync for Inner { } ++ ++extern { ++ type VTable; ++ type FailData; ++} ++ ++#[allow(dead_code)] ++struct InnerRaw { ++ header: InnerHeader, ++ failure: F, ++} ++ ++#[allow(dead_code)] ++struct InnerHeader { ++ backtrace: Backtrace, ++ vtable: *const VTable, ++} ++ ++struct TraitObject { ++ #[allow(dead_code)] ++ data: *const FailData, ++ vtable: *const VTable, ++} ++ ++impl From for ErrorImpl { ++ fn from(failure: F) -> ErrorImpl { ++ let backtrace = if failure.backtrace().is_none() { ++ Backtrace::new() ++ } else { ++ Backtrace::none() ++ }; ++ ++ unsafe { ++ let vtable = mem::transmute::<_, TraitObject>(&failure as &Fail).vtable; ++ ++ let ptr: *mut InnerRaw = match Heap.alloc(Layout::new::>()) { ++ Ok(p) => p as *mut InnerRaw, ++ Err(e) => Heap.oom(e), ++ }; ++ ++ // N.B. must use `ptr::write`, not `=`, to avoid dropping the contents of `*ptr` ++ ptr::write(ptr, InnerRaw { ++ header: InnerHeader { ++ backtrace, ++ vtable, ++ }, ++ failure, ++ }); ++ ++ let inner: &'static mut Inner = mem::transmute(ptr); ++ ++ ErrorImpl { inner } ++ } ++ } ++} ++ ++impl ErrorImpl { ++ pub(crate) fn failure(&self) -> &Fail { ++ unsafe { ++ mem::transmute::(TraitObject { ++ data: &self.inner.failure as *const FailData, ++ vtable: self.inner.vtable, ++ }) ++ } ++ } ++ ++ pub(crate) fn failure_mut(&mut self) -> &mut Fail { ++ unsafe { ++ mem::transmute::(TraitObject { ++ data: &mut self.inner.failure as *const FailData, ++ vtable: self.inner.vtable, ++ }) ++ } ++ } ++ ++ pub(crate) fn backtrace(&self) -> &Backtrace { ++ &self.inner.backtrace ++ } ++ ++ pub(crate) fn downcast(self) -> Result { ++ let ret: Option = self.failure().downcast_ref().map(|fail| { ++ unsafe { ++ // drop the backtrace ++ let _ = ptr::read(&self.inner.backtrace as *const Backtrace); ++ // read out the fail type ++ ptr::read(fail as *const T) ++ } ++ }); ++ match ret { ++ Some(ret) => { ++ // forget self (backtrace is dropped, failure is moved ++ mem::forget(self); ++ Ok(ret) ++ } ++ _ => Err(self) ++ } ++ } ++} ++ ++ ++#[cfg(test)] ++mod test { ++ use std::mem::size_of; ++ ++ use super::ErrorImpl; ++ ++ #[test] ++ fn assert_is_one_word() { ++ assert_eq!(size_of::(), size_of::()); ++ } ++} diff --cc vendor/failure-0.1.2/src/error/mod.rs index 000000000,000000000..04c0303c3 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/error/mod.rs @@@ -1,0 -1,0 +1,228 @@@ ++use core::fmt::{self, Display, Debug}; ++ ++use {Causes, Fail}; ++use backtrace::Backtrace; ++use context::Context; ++use compat::Compat; ++ ++#[cfg(feature = "std")] ++use box_std::BoxStd; ++ ++#[cfg_attr(feature = "small-error", path = "./error_impl_small.rs")] ++mod error_impl; ++use self::error_impl::ErrorImpl; ++ ++#[cfg(feature = "std")] ++use std::error::Error as StdError; ++ ++ ++/// The `Error` type, which can contain any failure. ++/// ++/// Functions which accumulate many kinds of errors should return this type. ++/// All failures can be converted into it, so functions which catch those ++/// errors can be tried with `?` inside of a function that returns this kind ++/// of error. ++/// ++/// In addition to implementing `Debug` and `Display`, this type carries `Backtrace` ++/// information, and can be downcast into the failure that underlies it for ++/// more detailed inspection. ++pub struct Error { ++ imp: ErrorImpl, ++} ++ ++impl From for Error { ++ fn from(failure: F) -> Error { ++ Error { ++ imp: ErrorImpl::from(failure) ++ } ++ } ++} ++ ++impl Error { ++ /// Creates an `Error` from `Box`. ++ /// ++ /// This method is useful for comparability with code, ++ /// which does not use the `Fail` trait. ++ /// ++ /// # Example ++ /// ++ /// ``` ++ /// use std::error::Error as StdError; ++ /// use failure::Error; ++ /// ++ /// fn app_fn() -> Result { ++ /// let x = library_fn().map_err(Error::from_boxed_compat)?; ++ /// Ok(x * 2) ++ /// } ++ /// ++ /// fn library_fn() -> Result> { ++ /// Ok(92) ++ /// } ++ /// ``` ++ #[cfg(feature = "std")] ++ pub fn from_boxed_compat(err: Box) -> Error { ++ Error::from(BoxStd(err)) ++ } ++ ++ /// Return a reference to the underlying failure that this `Error` ++ /// contains. ++ pub fn as_fail(&self) -> &Fail { ++ self.imp.failure() ++ } ++ ++ /// Returns a reference to the underlying cause of this `Error`. Unlike the ++ /// method on `Fail`, this does not return an `Option`. The `Error` type ++ /// always has an underlying failure. ++ /// ++ /// This method has been deprecated in favor of the [Error::as_fail] method, ++ /// which does the same thing. ++ #[deprecated(since = "0.1.2", note = "please use 'as_fail()' method instead")] ++ pub fn cause(&self) -> &Fail { ++ self.as_fail() ++ } ++ ++ /// Gets a reference to the `Backtrace` for this `Error`. ++ /// ++ /// If the failure this wrapped carried a backtrace, that backtrace will ++ /// be returned. Otherwise, the backtrace will have been constructed at ++ /// the point that failure was cast into the `Error` type. ++ pub fn backtrace(&self) -> &Backtrace { ++ self.imp.failure().backtrace().unwrap_or(&self.imp.backtrace()) ++ } ++ ++ /// Provides context for this `Error`. ++ /// ++ /// This can provide additional information about this error, appropriate ++ /// to the semantics of the current layer. That is, if you have a ++ /// lower-level error, such as an IO error, you can provide additional context ++ /// about what that error means in the context of your function. This ++ /// gives users of this function more information about what has gone ++ /// wrong. ++ /// ++ /// This takes any type that implements `Display`, as well as ++ /// `Send`/`Sync`/`'static`. In practice, this means it can take a `String` ++ /// or a string literal, or a failure, or some other custom context-carrying ++ /// type. ++ pub fn context(self, context: D) -> Context { ++ Context::with_err(context, self) ++ } ++ ++ /// Wraps `Error` in a compatibility type. ++ /// ++ /// This type implements the `Error` trait from `std::error`. If you need ++ /// to pass failure's `Error` to an interface that takes any `Error`, you ++ /// can use this method to get a compatible type. ++ pub fn compat(self) -> Compat { ++ Compat { error: self } ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type. ++ /// ++ /// This downcasts by value, returning an owned `T` if the underlying ++ /// failure is of the type `T`. For this reason it returns a `Result` - in ++ /// the case that the underlying error is of a different type, the ++ /// original `Error` is returned. ++ pub fn downcast(self) -> Result { ++ self.imp.downcast().map_err(|imp| Error { imp }) ++ } ++ ++ /// Returns the "root cause" of this error - the last value in the ++ /// cause chain which does not return an underlying `cause`. ++ pub fn find_root_cause(&self) -> &Fail { ++ self.as_fail().find_root_cause() ++ } ++ ++ /// Returns a iterator over the causes of this error with the cause ++ /// of the fail as the first item and the `root_cause` as the final item. ++ /// ++ /// Use `iter_chain` to also include the fail of this error itself. ++ pub fn iter_causes(&self) -> Causes { ++ self.as_fail().iter_causes() ++ } ++ ++ /// Returns a iterator over all fails up the chain from the current ++ /// as the first item up to the `root_cause` as the final item. ++ /// ++ /// This means that the chain also includes the fail itself which ++ /// means that it does *not* start with `cause`. To skip the outermost ++ /// fail use `iter_causes` instead. ++ pub fn iter_chain(&self) -> Causes { ++ self.as_fail().iter_chain() ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type by ++ /// reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_ref(&self) -> Option<&T> { ++ self.imp.failure().downcast_ref() ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type by ++ /// mutable reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_mut(&mut self) -> Option<&mut T> { ++ self.imp.failure_mut().downcast_mut() ++ } ++ ++ /// Deprecated alias to `find_root_cause`. ++ #[deprecated(since = "0.1.2", note = "please use the 'find_root_cause()' method instead")] ++ pub fn root_cause(&self) -> &Fail { ++ ::find_root_cause(self.as_fail()) ++ } ++ ++ /// Deprecated alias to `iter_causes`. ++ #[deprecated(since = "0.1.2", note = "please use the 'iter_chain()' method instead")] ++ pub fn causes(&self) -> Causes { ++ Causes { fail: Some(self.as_fail()) } ++ } ++} ++ ++impl Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ Display::fmt(&self.imp.failure(), f) ++ } ++} ++ ++impl Debug for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let backtrace = self.imp.backtrace(); ++ if backtrace.is_none() { ++ Debug::fmt(&self.imp.failure(), f) ++ } else { ++ write!(f, "{:?}\n\n{:?}", &self.imp.failure(), backtrace) ++ } ++ } ++} ++ ++impl AsRef for Error { ++ fn as_ref(&self) -> &Fail { ++ self.as_fail() ++ } ++} ++ ++#[cfg(test)] ++mod test { ++ use std::io; ++ use super::Error; ++ ++ fn assert_just_data() { } ++ ++ #[test] ++ fn assert_error_is_just_data() { ++ assert_just_data::(); ++ } ++ ++ #[test] ++ fn methods_seem_to_work() { ++ let io_error: io::Error = io::Error::new(io::ErrorKind::NotFound, "test"); ++ let error: Error = io::Error::new(io::ErrorKind::NotFound, "test").into(); ++ assert!(error.downcast_ref::().is_some()); ++ let _: ::Backtrace = *error.backtrace(); ++ assert_eq!(format!("{:?}", io_error), format!("{:?}", error)); ++ assert_eq!(format!("{}", io_error), format!("{}", error)); ++ drop(error); ++ assert!(true); ++ } ++} diff --cc vendor/failure-0.1.2/src/error_message.rs index 000000000,000000000..01ff1ae36 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/error_message.rs @@@ -1,0 -1,0 +1,28 @@@ ++use core::fmt::{self, Display, Debug}; ++ ++use Fail; ++use Error; ++ ++/// Constructs a `Fail` type from a string. ++/// ++/// This is a convenient way to turn a string into an error value that ++/// can be passed around, if you do not want to create a new `Fail` type for ++/// this use case. ++pub fn err_msg(msg: D) -> Error { ++ Error::from(ErrorMessage { msg }) ++} ++ ++/// A `Fail` type that just contains an error message. You can construct ++/// this from the `err_msg` function. ++#[derive(Debug)] ++struct ErrorMessage { ++ msg: D, ++} ++ ++impl Fail for ErrorMessage { } ++ ++impl Display for ErrorMessage { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ Display::fmt(&self.msg, f) ++ } ++} diff --cc vendor/failure-0.1.2/src/lib.rs index 000000000,000000000..1002e7608 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/lib.rs @@@ -1,0 -1,0 +1,289 @@@ ++//! An experimental new error-handling library. Guide-style introduction ++//! is available [here](https://boats.gitlab.io/failure/). ++//! ++//! The primary items exported by this library are: ++//! ++//! - `Fail`: a new trait for custom error types in Rust. ++//! - `Error`: a wrapper around `Fail` types to make it easy to coalesce them ++//! at higher levels. ++//! ++//! As a general rule, library authors should create their own error types and ++//! implement `Fail` for them, whereas application authors should primarily ++//! deal with the `Error` type. There are exceptions to this rule, though, in ++//! both directions, and users should do whatever seems most appropriate to ++//! their situation. ++//! ++//! ## Backtraces ++//! ++//! Backtraces are disabled by default. To turn backtraces on, enable ++//! the `backtrace` Cargo feature and set the `RUST_BACKTRACE` environment ++//! variable to a non-zero value (this also enables backtraces for panics). ++//! Use the `RUST_FAILURE_BACKTRACE` variable to enable or disable backtraces ++//! for `failure` specifically. ++#![cfg_attr(not(feature = "std"), no_std)] ++#![deny(missing_docs)] ++#![deny(warnings)] ++#![cfg_attr(feature = "small-error", feature(extern_types, allocator_api))] ++ ++macro_rules! with_std { ($($i:item)*) => ($(#[cfg(feature = "std")]$i)*) } ++macro_rules! without_std { ($($i:item)*) => ($(#[cfg(not(feature = "std"))]$i)*) } ++ ++// Re-export libcore using an alias so that the macros can work without ++// requiring `extern crate core` downstream. ++#[doc(hidden)] ++pub extern crate core as _core; ++ ++mod backtrace; ++#[cfg(feature = "std")] ++mod box_std; ++mod compat; ++mod context; ++mod result_ext; ++ ++use core::any::TypeId; ++use core::fmt::{Debug, Display}; ++ ++pub use backtrace::Backtrace; ++pub use compat::Compat; ++pub use context::Context; ++pub use result_ext::ResultExt; ++ ++#[cfg(feature = "failure_derive")] ++#[allow(unused_imports)] ++#[macro_use] ++extern crate failure_derive; ++ ++#[cfg(feature = "failure_derive")] ++#[doc(hidden)] ++pub use failure_derive::*; ++ ++with_std! { ++ extern crate core; ++ ++ mod sync_failure; ++ pub use sync_failure::SyncFailure; ++ ++ mod error; ++ ++ use std::error::Error as StdError; ++ ++ pub use error::Error; ++ ++ /// A common result with an `Error`. ++ pub type Fallible = Result; ++ ++ mod macros; ++ mod error_message; ++ pub use error_message::err_msg; ++} ++ ++/// The `Fail` trait. ++/// ++/// Implementors of this trait are called 'failures'. ++/// ++/// All error types should implement `Fail`, which provides a baseline of ++/// functionality that they all share. ++/// ++/// `Fail` has no required methods, but it does require that your type ++/// implement several other traits: ++/// ++/// - `Display`: to print a user-friendly representation of the error. ++/// - `Debug`: to print a verbose, developer-focused representation of the ++/// error. ++/// - `Send + Sync`: Your error type is required to be safe to transfer to and ++/// reference from another thread ++/// ++/// Additionally, all failures must be `'static`. This enables downcasting. ++/// ++/// `Fail` provides several methods with default implementations. Two of these ++/// may be appropriate to override depending on the definition of your ++/// particular failure: the `cause` and `backtrace` methods. ++/// ++/// The `failure_derive` crate provides a way to derive the `Fail` trait for ++/// your type. Additionally, all types that already implement ++/// `std::error::Error`, and are also `Send`, `Sync`, and `'static`, implement ++/// `Fail` by a blanket impl. ++pub trait Fail: Display + Debug + Send + Sync + 'static { ++ /// Returns a reference to the underlying cause of this failure, if it ++ /// is an error that wraps other errors. ++ /// ++ /// Returns `None` if this failure does not have another error as its ++ /// underlying cause. By default, this returns `None`. ++ /// ++ /// This should **never** return a reference to `self`, but only return ++ /// `Some` when it can return a **different** failure. Users may loop ++ /// over the cause chain, and returning `self` would result in an infinite ++ /// loop. ++ fn cause(&self) -> Option<&Fail> { ++ None ++ } ++ ++ /// Returns a reference to the `Backtrace` carried by this failure, if it ++ /// carries one. ++ /// ++ /// Returns `None` if this failure does not carry a backtrace. By ++ /// default, this returns `None`. ++ fn backtrace(&self) -> Option<&Backtrace> { ++ None ++ } ++ ++ /// Provides context for this failure. ++ /// ++ /// This can provide additional information about this error, appropriate ++ /// to the semantics of the current layer. That is, if you have a ++ /// lower-level error, such as an IO error, you can provide additional context ++ /// about what that error means in the context of your function. This ++ /// gives users of this function more information about what has gone ++ /// wrong. ++ /// ++ /// This takes any type that implements `Display`, as well as ++ /// `Send`/`Sync`/`'static`. In practice, this means it can take a `String` ++ /// or a string literal, or another failure, or some other custom context-carrying ++ /// type. ++ fn context(self, context: D) -> Context ++ where ++ D: Display + Send + Sync + 'static, ++ Self: Sized, ++ { ++ Context::with_err(context, self) ++ } ++ ++ /// Wraps this failure in a compatibility wrapper that implements ++ /// `std::error::Error`. ++ /// ++ /// This allows failures to be compatible with older crates that ++ /// expect types that implement the `Error` trait from `std::error`. ++ fn compat(self) -> Compat ++ where ++ Self: Sized, ++ { ++ Compat { error: self } ++ } ++ ++ #[doc(hidden)] ++ #[deprecated(since = "0.1.2", note = "please use the 'iter_causes()' method instead")] ++ fn causes(&self) -> Causes ++ where ++ Self: Sized, ++ { ++ Causes { fail: Some(self) } ++ } ++ ++ #[doc(hidden)] ++ #[deprecated(since = "0.1.2", note = "please use the 'find_root_cause()' method instead")] ++ fn root_cause(&self) -> &Fail ++ where ++ Self: Sized, ++ { ++ find_root_cause(self) ++ } ++ ++ #[doc(hidden)] ++ fn __private_get_type_id__(&self) -> TypeId { ++ TypeId::of::() ++ } ++} ++ ++impl Fail { ++ /// Attempts to downcast this failure to a concrete type by reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_ref(&self) -> Option<&T> { ++ if self.__private_get_type_id__() == TypeId::of::() { ++ unsafe { Some(&*(self as *const Fail as *const T)) } ++ } else { ++ None ++ } ++ } ++ ++ /// Attempts to downcast this failure to a concrete type by mutable ++ /// reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_mut(&mut self) -> Option<&mut T> { ++ if self.__private_get_type_id__() == TypeId::of::() { ++ unsafe { Some(&mut *(self as *mut Fail as *mut T)) } ++ } else { ++ None ++ } ++ } ++ ++ /// Returns the "root cause" of this `Fail` - the last value in the ++ /// cause chain which does not return an underlying `cause`. ++ /// ++ /// If this type does not have a cause, `self` is returned, because ++ /// it is its own root cause. ++ /// ++ /// This is equivalent to iterating over `iter_causes()` and taking ++ /// the last item. ++ pub fn find_root_cause(&self) -> &Fail { ++ find_root_cause(self) ++ } ++ ++ /// Returns a iterator over the causes of this `Fail` with the cause ++ /// of this fail as the first item and the `root_cause` as the final item. ++ /// ++ /// Use `iter_chain` to also include the fail itself. ++ pub fn iter_causes(&self) -> Causes { ++ Causes { fail: self.cause() } ++ } ++ ++ /// Returns a iterator over all fails up the chain from the current ++ /// as the first item up to the `root_cause` as the final item. ++ /// ++ /// This means that the chain also includes the fail itself which ++ /// means that it does *not* start with `cause`. To skip the outermost ++ /// fail use `iter_causes` instead. ++ pub fn iter_chain(&self) -> Causes { ++ Causes { fail: Some(self) } ++ } ++ ++ /// Deprecated alias to `find_root_cause`. ++ #[deprecated(since = "0.1.2", note = "please use the 'find_root_cause()' method instead")] ++ pub fn root_cause(&self) -> &Fail { ++ find_root_cause(self) ++ } ++ ++ /// Deprecated alias to `iter_causes`. ++ #[deprecated(since = "0.1.2", note = "please use the 'iter_chain()' method instead")] ++ pub fn causes(&self) -> Causes { ++ Causes { fail: Some(self) } ++ } ++} ++ ++#[cfg(feature = "std")] ++impl Fail for E {} ++ ++#[cfg(feature = "std")] ++impl Fail for Box { ++ fn cause(&self) -> Option<&Fail> { ++ (**self).cause() ++ } ++ ++ fn backtrace(&self) -> Option<&Backtrace> { ++ (**self).backtrace() ++ } ++} ++ ++/// A iterator over the causes of a `Fail` ++pub struct Causes<'f> { ++ fail: Option<&'f Fail>, ++} ++ ++impl<'f> Iterator for Causes<'f> { ++ type Item = &'f Fail; ++ fn next(&mut self) -> Option<&'f Fail> { ++ self.fail.map(|fail| { ++ self.fail = fail.cause(); ++ fail ++ }) ++ } ++} ++ ++fn find_root_cause(mut fail: &Fail) -> &Fail { ++ while let Some(cause) = fail.cause() { ++ fail = cause; ++ } ++ ++ fail ++} diff --cc vendor/failure-0.1.2/src/macros.rs index 000000000,000000000..b9e363a7d new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/macros.rs @@@ -1,0 -1,0 +1,51 @@@ ++/// Exits a function early with an `Error`. ++/// ++/// The `bail!` macro provides an easy way to exit a function. `bail!(X)` is ++/// equivalent to writing: ++/// ++/// ```rust,ignore ++/// return Err(format_err!(X)) ++/// ``` ++#[macro_export] ++macro_rules! bail { ++ ($e:expr) => { ++ return Err($crate::err_msg($e)); ++ }; ++ ($fmt:expr, $($arg:tt)+) => { ++ return Err($crate::err_msg(format!($fmt, $($arg)+))); ++ }; ++} ++ ++/// Exits a function early with an `Error` if the condition is not satisfied. ++/// ++/// Similar to `assert!`, `ensure!` takes a condition and exits the function ++/// if the condition fails. Unlike `assert!`, `ensure!` returns an `Error`, ++/// it does not panic. ++#[macro_export] ++macro_rules! ensure { ++ ($cond:expr, $e:expr) => { ++ if !($cond) { ++ bail!($e); ++ } ++ }; ++ ($cond:expr, $fmt:expr, $($arg:tt)+) => { ++ if !($cond) { ++ bail!($fmt, $($arg)+); ++ } ++ }; ++} ++ ++/// Constructs an `Error` using the standard string interpolation syntax. ++/// ++/// ```rust ++/// #[macro_use] extern crate failure; ++/// ++/// fn main() { ++/// let code = 101; ++/// let err = format_err!("Error code: {}", code); ++/// } ++/// ``` ++#[macro_export] ++macro_rules! format_err { ++ ($($arg:tt)*) => { $crate::err_msg(format!($($arg)*)) } ++} diff --cc vendor/failure-0.1.2/src/result_ext.rs index 000000000,000000000..f4125cdd6 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/result_ext.rs @@@ -1,0 -1,0 +1,203 @@@ ++use core::fmt::Display; ++ ++use {Compat, Context, Fail}; ++ ++/// Extension methods for `Result`. ++pub trait ResultExt { ++ /// Wraps the error in `Compat` to make it compatible with older error ++ /// handling APIs that expect `std::error::Error`. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// # fn main() { ++ /// # tests::run_test(); ++ /// # } ++ /// # ++ /// # #[cfg(not(all(feature = "std", feature = "derive")))] mod tests { pub fn run_test() { } } ++ /// # ++ /// # #[cfg(all(feature = "std", feature = "derive"))] mod tests { ++ /// use std::error::Error; ++ /// # use std::fmt; ++ /// # ++ /// # extern crate failure; ++ /// # ++ /// # use tests::failure::ResultExt; ++ /// # ++ /// # #[derive(Debug)] ++ /// struct CustomError; ++ /// ++ /// impl Error for CustomError { ++ /// fn description(&self) -> &str { ++ /// "My custom error message" ++ /// } ++ /// ++ /// fn cause(&self) -> Option<&Error> { ++ /// None ++ /// } ++ /// } ++ /// # ++ /// # impl fmt::Display for CustomError { ++ /// # fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ /// # write!(f, "{}", self.description()) ++ /// # } ++ /// # } ++ /// # ++ /// # pub fn run_test() { ++ /// ++ /// let x = (|| -> Result<(), failure::Error> { ++ /// Err(CustomError).compat()? ++ /// })().with_context(|e| { ++ /// format!("An error occured: {}", e) ++ /// }).unwrap_err(); ++ /// ++ /// let x = format!("{}", x); ++ /// ++ /// assert_eq!(x, "An error occured: My custom error message"); ++ /// # } ++ /// ++ /// # } ++ /// ``` ++ fn compat(self) -> Result>; ++ ++ /// Wraps the error type in a context type. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// # #[cfg(all(feature = "std", feature = "derive"))] ++ /// # #[macro_use] extern crate failure; ++ /// # ++ /// # #[cfg(all(feature = "std", feature = "derive"))] ++ /// # #[macro_use] extern crate failure_derive; ++ /// # ++ /// # fn main() { ++ /// # tests::run_test(); ++ /// # } ++ /// # ++ /// # #[cfg(not(all(feature = "std", feature = "derive")))] mod tests { pub fn run_test() { } } ++ /// # ++ /// # #[cfg(all(feature = "std", feature = "derive"))] mod tests { ++ /// # ++ /// # use failure::{self, ResultExt}; ++ /// # ++ /// #[derive(Fail, Debug)] ++ /// #[fail(display = "")] ++ /// struct CustomError; ++ /// # ++ /// # pub fn run_test() { ++ /// ++ /// let x = (|| -> Result<(), failure::Error> { ++ /// Err(CustomError)? ++ /// })().context(format!("An error occured")).unwrap_err(); ++ /// ++ /// let x = format!("{}", x); ++ /// ++ /// assert_eq!(x, "An error occured"); ++ /// # } ++ /// ++ /// # } ++ /// ``` ++ fn context(self, context: D) -> Result> ++ where ++ D: Display + Send + Sync + 'static; ++ ++ /// Wraps the error type in a context type generated by looking at the ++ /// error value. ++ /// ++ /// # Examples ++ /// ++ /// ``` ++ /// # #[cfg(all(feature = "std", feature = "derive"))] ++ /// # #[macro_use] extern crate failure; ++ /// # ++ /// # #[cfg(all(feature = "std", feature = "derive"))] ++ /// # #[macro_use] extern crate failure_derive; ++ /// # ++ /// # fn main() { ++ /// # tests::run_test(); ++ /// # } ++ /// # ++ /// # #[cfg(not(all(feature = "std", feature = "derive")))] mod tests { pub fn run_test() { } } ++ /// # ++ /// # #[cfg(all(feature = "std", feature = "derive"))] mod tests { ++ /// # ++ /// # use failure::{self, ResultExt}; ++ /// # ++ /// #[derive(Fail, Debug)] ++ /// #[fail(display = "My custom error message")] ++ /// struct CustomError; ++ /// # ++ /// # pub fn run_test() { ++ /// ++ /// let x = (|| -> Result<(), failure::Error> { ++ /// Err(CustomError)? ++ /// })().with_context(|e| { ++ /// format!("An error occured: {}", e) ++ /// }).unwrap_err(); ++ /// ++ /// let x = format!("{}", x); ++ /// ++ /// assert_eq!(x, "An error occured: My custom error message"); ++ /// # } ++ /// ++ /// # } ++ /// ``` ++ fn with_context(self, f: F) -> Result> ++ where ++ F: FnOnce(&E) -> D, ++ D: Display + Send + Sync + 'static; ++} ++ ++impl ResultExt for Result ++where ++ E: Fail, ++{ ++ fn compat(self) -> Result> { ++ self.map_err(|err| err.compat()) ++ } ++ ++ fn context(self, context: D) -> Result> ++ where ++ D: Display + Send + Sync + 'static, ++ { ++ self.map_err(|failure| failure.context(context)) ++ } ++ ++ fn with_context(self, f: F) -> Result> ++ where ++ F: FnOnce(&E) -> D, ++ D: Display + Send + Sync + 'static, ++ { ++ self.map_err(|failure| { ++ let context = f(&failure); ++ failure.context(context) ++ }) ++ } ++} ++ ++with_std! { ++ use Error; ++ ++ impl ResultExt for Result { ++ fn compat(self) -> Result> { ++ self.map_err(|err| err.compat()) ++ } ++ ++ fn context(self, context: D) -> Result> where ++ D: Display + Send + Sync + 'static ++ { ++ self.map_err(|failure| failure.context(context)) ++ } ++ ++ fn with_context(self, f: F) -> Result> where ++ F: FnOnce(&Error) -> D, ++ D: Display + Send + Sync + 'static ++ { ++ self.map_err(|failure| { ++ let context = f(&failure); ++ failure.context(context) ++ }) ++ } ++ } ++} diff --cc vendor/failure-0.1.2/src/small_error.rs index 000000000,000000000..09646e391 new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/small_error.rs @@@ -1,0 -1,0 +1,264 @@@ ++use core::fmt::{self, Display, Debug}; ++use std::heap::{Heap, Alloc, Layout}; ++ ++use core::mem; ++use core::ptr; ++ ++use {Causes, Fail}; ++use backtrace::Backtrace; ++use context::Context; ++use compat::Compat; ++ ++/// The `Error` type, which can contain any failure. ++/// ++/// Functions which accumulate many kinds of errors should return this type. ++/// All failures can be converted into it, so functions which catch those ++/// errors can be tried with `?` inside of a function that returns this kind ++/// of error. ++/// ++/// In addition to implementing `Debug` and `Display`, this type carries `Backtrace` ++/// information, and can be downcast into the failure that underlies it for ++/// more detailed inspection. ++pub struct Error { ++ inner: &'static mut Inner, ++} ++ ++// Dynamically sized inner value ++struct Inner { ++ backtrace: Backtrace, ++ vtable: *const VTable, ++ failure: FailData, ++} ++ ++unsafe impl Send for Inner { } ++unsafe impl Sync for Inner { } ++ ++extern { ++ type VTable; ++ type FailData; ++} ++ ++struct InnerRaw { ++ header: InnerHeader, ++ failure: F, ++} ++ ++struct InnerHeader { ++ backtrace: Backtrace, ++ vtable: *const VTable, ++} ++ ++struct TraitObject { ++ #[allow(dead_code)] ++ data: *const FailData, ++ vtable: *const VTable, ++} ++ ++impl From for Error { ++ fn from(failure: F) -> Error { ++ let backtrace = if failure.backtrace().is_none() { ++ Backtrace::new() ++ } else { ++ Backtrace::none() ++ }; ++ ++ unsafe { ++ let vtable = mem::transmute::<_, TraitObject>(&failure as &Fail).vtable; ++ ++ let ptr: *mut InnerRaw = match Heap.alloc(Layout::new::>()) { ++ Ok(p) => p as *mut InnerRaw, ++ Err(e) => Heap.oom(e), ++ }; ++ ++ // N.B. must use `ptr::write`, not `=`, to avoid dropping the contents of `*ptr` ++ ptr::write(ptr, InnerRaw { ++ header: InnerHeader { ++ backtrace, ++ vtable, ++ }, ++ failure, ++ }); ++ ++ let inner: &'static mut Inner = mem::transmute(ptr); ++ ++ Error { inner } ++ } ++ } ++} ++ ++impl Inner { ++ fn failure(&self) -> &Fail { ++ unsafe { ++ mem::transmute::(TraitObject { ++ data: &self.failure as *const FailData, ++ vtable: self.vtable, ++ }) ++ } ++ } ++ ++ fn failure_mut(&mut self) -> &mut Fail { ++ unsafe { ++ mem::transmute::(TraitObject { ++ data: &mut self.failure as *const FailData, ++ vtable: self.vtable, ++ }) ++ } ++ } ++} ++ ++impl Error { ++ /// Returns a reference to the underlying cause of this `Error`. Unlike the ++ /// method on `Fail`, this does not return an `Option`. The `Error` type ++ /// always has an underlying failure. ++ pub fn cause(&self) -> &Fail { ++ self.inner.failure() ++ } ++ ++ /// Gets a reference to the `Backtrace` for this `Error`. ++ /// ++ /// If the failure this wrapped carried a backtrace, that backtrace will ++ /// be returned. Otherwise, the backtrace will have been constructed at ++ /// the point that failure was cast into the `Error` type. ++ pub fn backtrace(&self) -> &Backtrace { ++ self.inner.failure().backtrace().unwrap_or(&self.inner.backtrace) ++ } ++ ++ /// Provides context for this `Error`. ++ /// ++ /// This can provide additional information about this error, appropriate ++ /// to the semantics of the current layer. That is, if you have a ++ /// lower-level error, such as an IO error, you can provide additional context ++ /// about what that error means in the context of your function. This ++ /// gives users of this function more information about what has gone ++ /// wrong. ++ /// ++ /// This takes any type that implements `Display`, as well as ++ /// `Send`/`Sync`/`'static`. In practice, this means it can take a `String` ++ /// or a string literal, or a failure, or some other custom context-carrying ++ /// type. ++ pub fn context(self, context: D) -> Context { ++ Context::with_err(context, self) ++ } ++ ++ /// Wraps `Error` in a compatibility type. ++ /// ++ /// This type implements the `Error` trait from `std::error`. If you need ++ /// to pass failure's `Error` to an interface that takes any `Error`, you ++ /// can use this method to get a compatible type. ++ pub fn compat(self) -> Compat { ++ Compat { error: self } ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type. ++ /// ++ /// This downcasts by value, returning an owned `T` if the underlying ++ /// failure is of the type `T`. For this reason it returns a `Result` - in ++ /// the case that the underlying error is of a different type, the ++ /// original `Error` is returned. ++ pub fn downcast(self) -> Result { ++ let ret: Option = self.downcast_ref().map(|fail| { ++ unsafe { ++ // drop the backtrace ++ let _ = ptr::read(&self.inner.backtrace as *const Backtrace); ++ // read out the fail type ++ ptr::read(fail as *const T) ++ } ++ }); ++ match ret { ++ Some(ret) => { ++ // forget self (backtrace is dropped, failure is moved ++ mem::forget(self); ++ Ok(ret) ++ } ++ _ => Err(self) ++ } ++ } ++ ++ /// Returns the "root cause" of this error - the last value in the ++ /// cause chain which does not return an underlying `cause`. ++ pub fn root_cause(&self) -> &Fail { ++ ::find_root_cause(self.cause()) ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type by ++ /// reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_ref(&self) -> Option<&T> { ++ self.inner.failure().downcast_ref() ++ } ++ ++ /// Attempts to downcast this `Error` to a particular `Fail` type by ++ /// mutable reference. ++ /// ++ /// If the underlying error is not of type `T`, this will return `None`. ++ pub fn downcast_mut(&mut self) -> Option<&mut T> { ++ self.inner.failure_mut().downcast_mut() ++ } ++ ++ /// Returns a iterator over the causes of the `Error`, beginning with ++ /// the failure returned by the `cause` method and ending with the failure ++ /// returned by `root_cause`. ++ pub fn causes(&self) -> Causes { ++ Causes { fail: Some(self.cause()) } ++ } ++} ++ ++impl Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ Display::fmt(self.inner.failure(), f) ++ } ++} ++ ++impl Debug for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ if self.inner.backtrace.is_none() { ++ Debug::fmt(self.inner.failure(), f) ++ } else { ++ write!(f, "{:?}\n\n{:?}", self.inner.failure(), self.inner.backtrace) ++ } ++ } ++} ++ ++impl Drop for Error { ++ fn drop(&mut self) { ++ unsafe { ++ let layout = { ++ let header = Layout::new::(); ++ header.extend(Layout::for_value(self.inner.failure())).unwrap().0 ++ }; ++ Heap.dealloc(self.inner as *const _ as *const u8 as *mut u8, layout); ++ } ++ } ++} ++ ++#[cfg(test)] ++mod test { ++ use std::mem::size_of; ++ use std::io; ++ ++ use super::Error; ++ ++ #[test] ++ fn assert_error_is_just_data() { ++ fn assert_just_data() { } ++ assert_just_data::(); ++ } ++ ++ #[test] ++ fn assert_is_one_word() { ++ assert_eq!(size_of::(), size_of::()); ++ } ++ ++ #[test] ++ fn methods_seem_to_work() { ++ let io_error: io::Error = io::Error::new(io::ErrorKind::NotFound, "test"); ++ let error: Error = io::Error::new(io::ErrorKind::NotFound, "test").into(); ++ assert!(error.downcast_ref::().is_some()); ++ let _: ::Backtrace = *error.backtrace(); ++ assert_eq!(format!("{:?}", io_error), format!("{:?}", error)); ++ assert_eq!(format!("{}", io_error), format!("{}", error)); ++ drop(error); ++ assert!(true); ++ } ++} diff --cc vendor/failure-0.1.2/src/sync_failure.rs index 000000000,000000000..63e966cdd new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/src/sync_failure.rs @@@ -1,0 -1,0 +1,97 @@@ ++use Fail; ++use std::error::Error; ++use std::fmt::{self, Debug, Display}; ++use std::sync::Mutex; ++ ++/// Wrapper for `std` errors to make them `Sync`. ++/// ++/// This exists to coerce existing types that are only `Error + Send + ++/// 'static` into a `Fail`-compatible representation, most notably for ++/// types generated by `error-chain`. ++/// ++/// Unfortunately, this requires wrapping the error in a `Mutex`, which must ++/// be locked for every `Debug`/`Display`. Therefore, this should be ++/// something of a last resort in making the error work with `failure`. ++/// ++pub struct SyncFailure { ++ inner: Mutex, ++} ++ ++impl SyncFailure { ++ /// Wraps a non-`Sync` `Error` in order to make it implement `Fail`. ++ /// ++ /// # Example ++ /// ++ /// ```rust ++ /// extern crate failure; ++ /// ++ /// # use std::error::Error as StdError; ++ /// # use std::fmt::{self, Display}; ++ /// use failure::{Error, SyncFailure}; ++ /// use std::cell::RefCell; ++ /// ++ /// #[derive(Debug)] ++ /// struct NonSyncError { ++ /// // RefCells are non-Sync, so structs containing them will be ++ /// // non-Sync as well. ++ /// count: RefCell, ++ /// } ++ /// ++ /// // implement Display/Error for NonSyncError... ++ /// # ++ /// # impl StdError for NonSyncError { ++ /// # fn description(&self) -> &str { ++ /// # "oops!" ++ /// # } ++ /// # } ++ /// # ++ /// # impl Display for NonSyncError { ++ /// # fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ /// # write!(f, "oops!") ++ /// # } ++ /// # } ++ /// ++ /// fn returns_error() -> Result<(), NonSyncError> { ++ /// // Do stuff ++ /// # Ok(()) ++ /// } ++ /// ++ /// fn my_function() -> Result<(), Error> { ++ /// // without the map_err here, we end up with a compile error ++ /// // complaining that NonSyncError doesn't implement Sync. ++ /// returns_error().map_err(SyncFailure::new)?; ++ /// // Do more stuff ++ /// # Ok(()) ++ /// } ++ /// # ++ /// # fn main() { ++ /// # my_function().unwrap(); ++ /// # } ++ /// ``` ++ /// ++ pub fn new(err: E) -> Self { ++ SyncFailure { ++ inner: Mutex::new(err), ++ } ++ } ++} ++ ++impl Display for SyncFailure ++where ++ T: Display, ++{ ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.inner.lock().unwrap().fmt(f) ++ } ++} ++ ++impl Debug for SyncFailure ++where ++ T: Debug, ++{ ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ (*self.inner.lock().unwrap()).fmt(f) ++ } ++} ++ ++impl Fail for SyncFailure {} diff --cc vendor/failure-0.1.2/travis.sh index 000000000,000000000..6c621ca5d new file mode 100644 --- /dev/null +++ b/vendor/failure-0.1.2/travis.sh @@@ -1,0 -1,0 +1,39 @@@ ++#!/bin/bash ++ ++DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ++ ++cargo_test() { ++ cargo test "$@" || { exit 101; } ++} ++ ++test_failure_in() { ++ cd $1 ++ cargo_test ++ cargo_test --no-default-features ++ cargo_test --features backtrace ++ test_derive_in "$1/failure_derive" ++ cd $DIR ++} ++ ++test_derive_in() { ++ cd $1 ++ cargo_test ++ cd $DIR ++} ++ ++test_nightly_features_in() { ++ cd $1 ++ #cargo_test --features small-error ++ cargo_test --all-features ++ cd $DIR ++} ++ ++main() { ++ test_failure_in "$DIR/failure-1.X" ++ test_failure_in "$DIR/failure-0.1.X" ++ if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then ++ test_nightly_features_in "$DIR/failure-1.X" ++ fi ++} ++ ++main diff --cc vendor/failure_derive-0.1.2/.cargo-checksum.json index 000000000,000000000..c82357864 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"946d0e98a50d9831f5d589038d2ca7f8f455b1c21028c0db0e84116a12696426"} diff --cc vendor/failure_derive-0.1.2/Cargo.toml index 000000000,000000000..367225240 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/Cargo.toml @@@ -1,0 -1,0 +1,41 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "failure_derive" ++version = "0.1.2" ++authors = ["Without Boats "] ++build = "build.rs" ++description = "derives for the failure crate" ++homepage = "https://boats.gitlab.io/failure" ++documentation = "https://boats.gitlab.io/failure" ++license = "MIT OR Apache-2.0" ++repository = "https://github.com/withoutboats/failure_derive" ++ ++[lib] ++proc-macro = true ++[dependencies.proc-macro2] ++version = "0.4.8" ++ ++[dependencies.quote] ++version = "0.6.3" ++ ++[dependencies.syn] ++version = "0.14.4" ++ ++[dependencies.synstructure] ++version = "0.9.0" ++[dev-dependencies.failure] ++version = "0.1.0" ++ ++[features] ++std = [] diff --cc vendor/failure_derive-0.1.2/build.rs index 000000000,000000000..8f220458d new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/build.rs @@@ -1,0 -1,0 +1,39 @@@ ++use std::env; ++use std::process::Command; ++use std::str; ++use std::str::FromStr; ++ ++fn main() { ++ if rustc_has_dyn_trait() { ++ println!("cargo:rustc-cfg=has_dyn_trait"); ++ } ++} ++ ++fn rustc_has_dyn_trait() -> bool { ++ let rustc = match env::var_os("RUSTC") { ++ Some(rustc) => rustc, ++ None => return false, ++ }; ++ ++ let output = match Command::new(rustc).arg("--version").output() { ++ Ok(output) => output, ++ Err(_) => return false, ++ }; ++ ++ let version = match str::from_utf8(&output.stdout) { ++ Ok(version) => version, ++ Err(_) => return false, ++ }; ++ ++ let mut pieces = version.split('.'); ++ if pieces.next() != Some("rustc 1") { ++ return true; ++ } ++ ++ let next = match pieces.next() { ++ Some(next) => next, ++ None => return false, ++ }; ++ ++ u32::from_str(next).unwrap_or(0) >= 27 ++} diff --cc vendor/failure_derive-0.1.2/src/lib.rs index 000000000,000000000..230d2eea1 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/src/lib.rs @@@ -1,0 -1,0 +1,200 @@@ ++extern crate proc_macro2; ++extern crate syn; ++ ++#[macro_use] ++extern crate synstructure; ++#[macro_use] ++extern crate quote; ++ ++use proc_macro2::TokenStream; ++ ++decl_derive!([Fail, attributes(fail, cause)] => fail_derive); ++ ++fn fail_derive(s: synstructure::Structure) -> TokenStream { ++ let make_dyn = if cfg!(has_dyn_trait) { ++ quote! { &dyn } ++ } else { ++ quote! { & } ++ }; ++ ++ let cause_body = s.each_variant(|v| { ++ if let Some(cause) = v.bindings().iter().find(is_cause) { ++ quote!(return Some(#cause)) ++ } else { ++ quote!(return None) ++ } ++ }); ++ ++ let bt_body = s.each_variant(|v| { ++ if let Some(bi) = v.bindings().iter().find(is_backtrace) { ++ quote!(return Some(#bi)) ++ } else { ++ quote!(return None) ++ } ++ }); ++ ++ let fail = s.unbound_impl( ++ quote!(::failure::Fail), ++ quote! { ++ #[allow(unreachable_code)] ++ fn cause(&self) -> ::failure::_core::option::Option<#make_dyn(::failure::Fail)> { ++ match *self { #cause_body } ++ None ++ } ++ ++ #[allow(unreachable_code)] ++ fn backtrace(&self) -> ::failure::_core::option::Option<&::failure::Backtrace> { ++ match *self { #bt_body } ++ None ++ } ++ }, ++ ); ++ let display = display_body(&s).map(|display_body| { ++ s.unbound_impl( ++ quote!(::failure::_core::fmt::Display), ++ quote! { ++ #[allow(unreachable_code)] ++ fn fmt(&self, f: &mut ::failure::_core::fmt::Formatter) -> ::failure::_core::fmt::Result { ++ match *self { #display_body } ++ write!(f, "An error has occurred.") ++ } ++ }, ++ ) ++ }); ++ ++ (quote! { ++ #fail ++ #display ++ }).into() ++} ++ ++fn display_body(s: &synstructure::Structure) -> Option { ++ let mut msgs = s.variants().iter().map(|v| find_error_msg(&v.ast().attrs)); ++ if msgs.all(|msg| msg.is_none()) { ++ return None; ++ } ++ ++ Some(s.each_variant(|v| { ++ let msg = ++ find_error_msg(&v.ast().attrs).expect("All variants must have display attribute."); ++ if msg.nested.is_empty() { ++ panic!("Expected at least one argument to fail attribute"); ++ } ++ ++ let format_string = match msg.nested[0] { ++ syn::NestedMeta::Meta(syn::Meta::NameValue(ref nv)) if nv.ident == "display" => { ++ nv.lit.clone() ++ } ++ _ => { ++ panic!("Fail attribute must begin `display = \"\"` to control the Display message.") ++ } ++ }; ++ let args = msg.nested.iter().skip(1).map(|arg| match *arg { ++ syn::NestedMeta::Literal(syn::Lit::Int(ref i)) => { ++ let bi = &v.bindings()[i.value() as usize]; ++ quote!(#bi) ++ } ++ syn::NestedMeta::Meta(syn::Meta::Word(ref id)) => { ++ let id_s = id.to_string(); ++ if id_s.starts_with("_") { ++ if let Ok(idx) = id_s[1..].parse::() { ++ let bi = match v.bindings().get(idx) { ++ Some(bi) => bi, ++ None => { ++ panic!( ++ "display attempted to access field `{}` in `{}::{}` which \ ++ does not exist (there are {} field{})", ++ idx, ++ s.ast().ident, ++ v.ast().ident, ++ v.bindings().len(), ++ if v.bindings().len() != 1 { "s" } else { "" } ++ ); ++ } ++ }; ++ return quote!(#bi); ++ } ++ } ++ for bi in v.bindings() { ++ if bi.ast().ident.as_ref() == Some(id) { ++ return quote!(#bi); ++ } ++ } ++ panic!( ++ "Couldn't find field `{}` in `{}::{}`", ++ id, ++ s.ast().ident, ++ v.ast().ident ++ ); ++ } ++ _ => panic!("Invalid argument to fail attribute!"), ++ }); ++ ++ quote! { ++ return write!(f, #format_string #(, #args)*) ++ } ++ })) ++} ++ ++fn find_error_msg(attrs: &[syn::Attribute]) -> Option { ++ let mut error_msg = None; ++ for attr in attrs { ++ if let Some(meta) = attr.interpret_meta() { ++ if meta.name() == "fail" { ++ if error_msg.is_some() { ++ panic!("Cannot have two display attributes") ++ } else { ++ if let syn::Meta::List(list) = meta { ++ error_msg = Some(list); ++ } else { ++ panic!("fail attribute must take a list in parentheses") ++ } ++ } ++ } ++ } ++ } ++ error_msg ++} ++ ++fn is_backtrace(bi: &&synstructure::BindingInfo) -> bool { ++ match bi.ast().ty { ++ syn::Type::Path(syn::TypePath { ++ qself: None, ++ path: syn::Path { ++ segments: ref path, .. ++ }, ++ }) => path.last().map_or(false, |s| { ++ s.value().ident == "Backtrace" && s.value().arguments.is_empty() ++ }), ++ _ => false, ++ } ++} ++ ++fn is_cause(bi: &&synstructure::BindingInfo) -> bool { ++ let mut found_cause = false; ++ for attr in &bi.ast().attrs { ++ if let Some(meta) = attr.interpret_meta() { ++ if meta.name() == "cause" { ++ if found_cause { ++ panic!("Cannot have two `cause` attributes"); ++ } ++ found_cause = true; ++ } ++ if meta.name() == "fail" { ++ if let syn::Meta::List(ref list) = meta { ++ if let Some(ref pair) = list.nested.first() { ++ if let &&syn::NestedMeta::Meta(syn::Meta::Word(ref word)) = pair.value() { ++ if word == "cause" { ++ if found_cause { ++ panic!("Cannot have two `cause` attributes"); ++ } ++ found_cause = true; ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ found_cause ++} diff --cc vendor/failure_derive-0.1.2/tests/backtrace.rs index 000000000,000000000..a30718411 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/tests/backtrace.rs @@@ -1,0 -1,0 +1,64 @@@ ++extern crate failure; ++#[macro_use] ++extern crate failure_derive; ++ ++use failure::{Backtrace, Fail}; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "Error code: {}", code)] ++struct BacktraceError { ++ backtrace: Backtrace, ++ code: u32, ++} ++ ++#[test] ++fn backtrace_error() { ++ let err = BacktraceError { ++ backtrace: Backtrace::new(), ++ code: 7, ++ }; ++ let s = format!("{}", err); ++ assert_eq!(&s[..], "Error code: 7"); ++ assert!(err.backtrace().is_some()); ++} ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error has occurred.")] ++struct BacktraceTupleError(Backtrace); ++ ++#[test] ++fn backtrace_tuple_error() { ++ let err = BacktraceTupleError(Backtrace::new()); ++ let s = format!("{}", err); ++ assert_eq!(&s[..], "An error has occurred."); ++ assert!(err.backtrace().is_some()); ++} ++ ++#[derive(Fail, Debug)] ++enum BacktraceEnumError { ++ #[fail(display = "Error code: {}", code)] ++ StructVariant { code: i32, backtrace: Backtrace }, ++ #[fail(display = "Error: {}", _0)] ++ TupleVariant(&'static str, Backtrace), ++ #[fail(display = "An error has occurred.")] ++ UnitVariant, ++} ++ ++#[test] ++fn backtrace_enum_error() { ++ let err = BacktraceEnumError::StructVariant { ++ code: 2, ++ backtrace: Backtrace::new(), ++ }; ++ let s = format!("{}", err); ++ assert_eq!(&s[..], "Error code: 2"); ++ assert!(err.backtrace().is_some()); ++ let err = BacktraceEnumError::TupleVariant("foobar", Backtrace::new()); ++ let s = format!("{}", err); ++ assert_eq!(&s[..], "Error: foobar"); ++ assert!(err.backtrace().is_some()); ++ let err = BacktraceEnumError::UnitVariant; ++ let s = format!("{}", err); ++ assert_eq!(&s[..], "An error has occurred."); ++ assert!(err.backtrace().is_none()); ++} diff --cc vendor/failure_derive-0.1.2/tests/custom_type_bounds.rs index 000000000,000000000..fd1c8b975 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/tests/custom_type_bounds.rs @@@ -1,0 -1,0 +1,45 @@@ ++#[macro_use] ++extern crate failure; ++ ++use std::fmt::Debug; ++ ++use failure::Fail; ++ ++#[derive(Debug, Fail)] ++#[fail(display = "An error has occurred.")] ++pub struct UnboundedGenericTupleError(T); ++ ++#[test] ++fn unbounded_generic_tuple_error() { ++ let s = format!("{}", UnboundedGenericTupleError(())); ++ assert_eq!(&s[..], "An error has occurred."); ++} ++ ++#[derive(Debug, Fail)] ++#[fail(display = "An error has occurred: {}", _0)] ++pub struct FailBoundsGenericTupleError(T); ++ ++#[test] ++fn fail_bounds_generic_tuple_error() { ++ let error = FailBoundsGenericTupleError(UnboundedGenericTupleError(())); ++ let s = format!("{}", error); ++ assert_eq!(&s[..], "An error has occurred: An error has occurred."); ++} ++ ++pub trait NoDisplay: 'static + Debug + Send + Sync {} ++ ++impl NoDisplay for &'static str {} ++ ++#[derive(Debug, Fail)] ++#[fail(display = "An error has occurred: {:?}", _0)] ++pub struct CustomBoundsGenericTupleError(T); ++ ++#[test] ++fn custom_bounds_generic_tuple_error() { ++ let error = CustomBoundsGenericTupleError("more details unavailable."); ++ let s = format!("{}", error); ++ assert_eq!( ++ &s[..], ++ "An error has occurred: \"more details unavailable.\"" ++ ); ++} diff --cc vendor/failure_derive-0.1.2/tests/no_derive_display.rs index 000000000,000000000..20eeb308c new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/tests/no_derive_display.rs @@@ -1,0 -1,0 +1,21 @@@ ++extern crate failure; ++#[macro_use] ++extern crate failure_derive; ++ ++use failure::Fail; ++use std::fmt::{self, Display}; ++ ++#[derive(Debug, Fail)] ++struct Foo; ++ ++impl Display for Foo { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.write_str("An error occurred.") ++ } ++} ++ ++#[test] ++fn handwritten_display() { ++ assert!(Foo.cause().is_none()); ++ assert_eq!(&format!("{}", Foo)[..], "An error occurred."); ++} diff --cc vendor/failure_derive-0.1.2/tests/tests.rs index 000000000,000000000..4e73255ef new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/tests/tests.rs @@@ -1,0 -1,0 +1,55 @@@ ++extern crate failure; ++#[macro_use] ++extern crate failure_derive; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error has occurred.")] ++struct UnitError; ++ ++#[test] ++fn unit_struct() { ++ let s = format!("{}", UnitError); ++ assert_eq!(&s[..], "An error has occurred."); ++} ++ ++#[derive(Fail, Debug)] ++#[fail(display = "Error code: {}", code)] ++struct RecordError { ++ code: u32, ++} ++ ++#[test] ++fn record_struct() { ++ let s = format!("{}", RecordError { code: 0 }); ++ assert_eq!(&s[..], "Error code: 0"); ++} ++ ++#[derive(Fail, Debug)] ++#[fail(display = "Error code: {}", _0)] ++struct TupleError(i32); ++ ++#[test] ++fn tuple_struct() { ++ let s = format!("{}", TupleError(2)); ++ assert_eq!(&s[..], "Error code: 2"); ++} ++ ++#[derive(Fail, Debug)] ++enum EnumError { ++ #[fail(display = "Error code: {}", code)] ++ StructVariant { code: i32 }, ++ #[fail(display = "Error: {}", _0)] ++ TupleVariant(&'static str), ++ #[fail(display = "An error has occurred.")] ++ UnitVariant, ++} ++ ++#[test] ++fn enum_error() { ++ let s = format!("{}", EnumError::StructVariant { code: 2 }); ++ assert_eq!(&s[..], "Error code: 2"); ++ let s = format!("{}", EnumError::TupleVariant("foobar")); ++ assert_eq!(&s[..], "Error: foobar"); ++ let s = format!("{}", EnumError::UnitVariant); ++ assert_eq!(&s[..], "An error has occurred."); ++} diff --cc vendor/failure_derive-0.1.2/tests/wraps.rs index 000000000,000000000..b33b6da95 new file mode 100644 --- /dev/null +++ b/vendor/failure_derive-0.1.2/tests/wraps.rs @@@ -1,0 -1,0 +1,99 @@@ ++extern crate failure; ++#[macro_use] ++extern crate failure_derive; ++ ++use std::fmt; ++use std::io; ++ ++use failure::{Backtrace, Fail}; ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error has occurred: {}", inner)] ++struct WrapError { ++ #[fail(cause)] ++ inner: io::Error, ++} ++ ++#[test] ++fn wrap_error() { ++ let inner = io::Error::from_raw_os_error(98); ++ let err = WrapError { inner }; ++ assert!( ++ err.cause() ++ .and_then(|err| err.downcast_ref::()) ++ .is_some() ++ ); ++} ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error has occurred: {}", _0)] ++struct WrapTupleError(#[fail(cause)] io::Error); ++ ++#[test] ++fn wrap_tuple_error() { ++ let io_error = io::Error::from_raw_os_error(98); ++ let err: WrapTupleError = WrapTupleError(io_error); ++ assert!( ++ err.cause() ++ .and_then(|err| err.downcast_ref::()) ++ .is_some() ++ ); ++} ++ ++#[derive(Fail, Debug)] ++#[fail(display = "An error has occurred: {}", inner)] ++struct WrapBacktraceError { ++ #[fail(cause)] ++ inner: io::Error, ++ backtrace: Backtrace, ++} ++ ++#[test] ++fn wrap_backtrace_error() { ++ let inner = io::Error::from_raw_os_error(98); ++ let err: WrapBacktraceError = WrapBacktraceError { ++ inner, ++ backtrace: Backtrace::new(), ++ }; ++ assert!( ++ err.cause() ++ .and_then(|err| err.downcast_ref::()) ++ .is_some() ++ ); ++ assert!(err.backtrace().is_some()); ++} ++ ++#[derive(Fail, Debug)] ++enum WrapEnumError { ++ #[fail(display = "An error has occurred: {}", _0)] ++ Io(#[fail(cause)] io::Error), ++ #[fail(display = "An error has occurred: {}", inner)] ++ Fmt { ++ #[fail(cause)] ++ inner: fmt::Error, ++ backtrace: Backtrace, ++ }, ++} ++ ++#[test] ++fn wrap_enum_error() { ++ let io_error = io::Error::from_raw_os_error(98); ++ let err: WrapEnumError = WrapEnumError::Io(io_error); ++ assert!( ++ err.cause() ++ .and_then(|err| err.downcast_ref::()) ++ .is_some() ++ ); ++ assert!(err.backtrace().is_none()); ++ let fmt_error = fmt::Error::default(); ++ let err: WrapEnumError = WrapEnumError::Fmt { ++ inner: fmt_error, ++ backtrace: Backtrace::new(), ++ }; ++ assert!( ++ err.cause() ++ .and_then(|err| err.downcast_ref::()) ++ .is_some() ++ ); ++ assert!(err.backtrace().is_some()); ++} diff --cc vendor/flate2-1.0.2/.cargo-checksum.json index 000000000,000000000..37121449f new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"37847f133aae7acf82bb9577ccd8bda241df836787642654286e79679826a54b"} diff --cc vendor/flate2-1.0.2/.travis.yml index 000000000,000000000..6496af106 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/.travis.yml @@@ -1,0 -1,0 +1,49 @@@ ++language: rust ++sudo: false ++ ++matrix: ++ include: ++ - rust: stable ++ - os: osx ++ - rust: beta ++ - rust: nightly ++ ++ - rust: stable ++ script: cargo run --manifest-path systest/Cargo.toml ++ ++ - rust: stable ++ env: RUST_BACKEND=1 ++ script: ++ - cargo test --features rust_backend ++ - cargo test --features rust_backend --no-default-features ++ ++ - rust: nightly ++ before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++ script: ++ - cargo doc --no-deps --all-features ++ after_success: ++ - travis-cargo --only nightly doc-upload ++ ++ allow_failures: ++ - env: RUST_BACKEND=1 ++ ++script: ++ - cargo build ++ - rustdoc --test README.md -L target/debug/deps --extern flate2=target/debug/libflate2.rlib ++ - cargo test ++ - cargo test --features zlib ++ - cargo test --features tokio ++ - cargo test --features 'tokio zlib' ++ - cargo test --features zlib --no-default-features ++ - cargo clean && cargo build ++ - cargo doc --no-deps ++ - cargo doc --no-deps --manifest-path=miniz-sys/Cargo.toml ++ ++env: ++ global: ++ secure: "PHVT7IaeP5nQQVwGHKwqCYBDp0QyetSlER7se2j2Xgfx+lw3Bu6VWH6VF04B636Gb0tHPN/sUCXSgGRcvDuy6XFOev4LfynoYxNKgHJYg2E34EP2QLwsFfnvE4iujaG3GJk3o935Y7OYGv2OP1HeG4Mv6JhQK0GLnNDBZQ65kWI=" ++ ++notifications: ++ email: ++ on_success: never diff --cc vendor/flate2-1.0.2/Cargo.toml index 000000000,000000000..088e8ed5d new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/Cargo.toml @@@ -1,0 -1,0 +1,67 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "flate2" ++version = "1.0.2" ++authors = ["Alex Crichton "] ++description = "Bindings to miniz.c for DEFLATE compression and decompression exposed as\nReader/Writer streams. Contains bindings for zlib, deflate, and gzip-based\nstreams.\n" ++homepage = "https://github.com/alexcrichton/flate2-rs" ++documentation = "https://docs.rs/flate2" ++readme = "README.md" ++keywords = ["gzip", "flate", "zlib", "encoding"] ++categories = ["compression", "api-bindings"] ++license = "MIT/Apache-2.0" ++repository = "https://github.com/alexcrichton/flate2-rs" ++[dependencies.futures] ++version = "0.1" ++optional = true ++ ++[dependencies.libc] ++version = "0.2" ++ ++[dependencies.libz-sys] ++version = "1.0" ++optional = true ++ ++[dependencies.miniz-sys] ++version = "0.1.7" ++optional = true ++ ++[dependencies.miniz_oxide_c_api] ++version = "0.1" ++features = ["no_c_export"] ++optional = true ++ ++[dependencies.tokio-io] ++version = "0.1" ++optional = true ++[dev-dependencies.quickcheck] ++version = "0.6" ++default-features = false ++ ++[dev-dependencies.rand] ++version = "0.5" ++ ++[dev-dependencies.tokio-core] ++version = "0.1" ++ ++[features] ++default = ["miniz-sys"] ++rust_backend = ["miniz_oxide_c_api"] ++tokio = ["tokio-io", "futures"] ++zlib = ["libz-sys"] ++[badges.appveyor] ++repository = "alexcrichton/flate2-rs" ++ ++[badges.travis-ci] ++repository = "alexcrichton/flate2-rs" diff --cc vendor/flate2-1.0.2/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/flate2-1.0.2/LICENSE-MIT index 000000000,000000000..39e0ed660 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2014 Alex Crichton ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/flate2-1.0.2/README.md index 000000000,000000000..8b403e53c new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/README.md @@@ -1,0 -1,0 +1,90 @@@ ++# flate2 ++ ++[![Build Status](https://travis-ci.org/alexcrichton/flate2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/flate2-rs) ++[![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/flate2-rs) ++[![Crates.io](https://img.shields.io/crates/v/flate2.svg?maxAge=2592000)](https://crates.io/crates/flate2) ++[![Documentation](https://docs.rs/flate2/badge.svg)](https://docs.rs/flate2) ++ ++A streaming compression/decompression library for Rust. The underlying ++implementation by default uses [`miniz`](https://github.com/richgel999/miniz) but ++can optionally be configured to use the system zlib, if available. ++ ++There is also an experimental rust backend that uses the ++[`miniz_oxide`](https://crates.io/crates/miniz_oxide) crate. This avoids the need ++to build C code, but hasn't gone through as much testing as the other backends. ++ ++Supported formats: ++ ++* deflate ++* zlib ++* gzip ++ ++```toml ++# Cargo.toml ++[dependencies] ++flate2 = "1.0" ++``` ++ ++Using zlib instead of miniz: ++ ++```toml ++[dependencies] ++flate2 = { version = "1.0", features = ["zlib"], default-features = false } ++``` ++ ++Using the rust back-end: ++ ++```toml ++[dependencies] ++flate2 = { version = "1.0", features = ["rust_backend"], default-features = false } ++``` ++ ++## Compression ++ ++```rust ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::Compression; ++use flate2::write::ZlibEncoder; ++ ++fn main() { ++ let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"foo"); ++ e.write_all(b"bar"); ++ let compressed_bytes = e.finish(); ++} ++``` ++ ++## Decompression ++ ++```rust,no_run ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::read::GzDecoder; ++ ++fn main() { ++ let mut d = GzDecoder::new("...".as_bytes()); ++ let mut s = String::new(); ++ d.read_to_string(&mut s).unwrap(); ++ println!("{}", s); ++} ++``` ++ ++# License ++ ++This project is licensed under either of ++ ++ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ++ http://www.apache.org/licenses/LICENSE-2.0) ++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or ++ http://opensource.org/licenses/MIT) ++ ++at your option. ++ ++### Contribution ++ ++Unless you explicitly state otherwise, any contribution intentionally submitted ++for inclusion in this project by you, as defined in the Apache-2.0 license, ++shall be dual licensed as above, without any additional terms or conditions. diff --cc vendor/flate2-1.0.2/appveyor.yml index 000000000,000000000..0a140f7bc new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/appveyor.yml @@@ -1,0 -1,0 +1,24 @@@ ++environment: ++ matrix: ++ - TARGET: x86_64-pc-windows-msvc ++ - TARGET: x86_64-pc-windows-gnu ++ - TARGET: i686-pc-windows-msvc ++ - TARGET: i686-pc-windows-gnu ++install: ++ - ps: >- ++ If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') { ++ $Env:PATH += ';C:\msys64\mingw64\bin' ++ } ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') { ++ $Env:PATH += ';C:\MinGW\bin' ++ } ++ - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" ++ - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" ++ - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin ++ - rustc -V ++ - cargo -V ++ ++build: false ++ ++test_script: ++ - cargo test --verbose --target %TARGET% ++ - cargo test --verbose --target %TARGET% --features tokio diff --cc vendor/flate2-1.0.2/examples/deflatedecoder-bufread.rs index 000000000,000000000..24bd5e668 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflatedecoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::DeflateEncoder; ++use flate2::bufread::DeflateDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut deflater = DeflateDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ deflater.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/deflatedecoder-read.rs index 000000000,000000000..87ac7e987 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflatedecoder-read.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::DeflateEncoder; ++use flate2::read::DeflateDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut deflater = DeflateDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ deflater.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/deflatedecoder-write.rs index 000000000,000000000..14226324c new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflatedecoder-write.rs @@@ -1,0 -1,0 +1,26 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::DeflateEncoder; ++use flate2::write::DeflateDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++// Here Vec implements Write ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut writer = Vec::new(); ++ let mut deflater = DeflateDecoder::new(writer); ++ deflater.write_all(&bytes[..])?; ++ writer = deflater.finish()?; ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ Ok(return_string) ++} diff --cc vendor/flate2-1.0.2/examples/deflateencoder-bufread.rs index 000000000,000000000..abdebbb07 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflateencoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::bufread::DeflateEncoder; ++use std::fs::File; ++use std::io::BufReader; ++ ++// Open file and debug print the contents compressed with Deflate ++fn main() { ++ println!("{:?}", open_hello_world().unwrap()); ++} ++ ++// Opens sample file, compresses the contents and returns a Vector or error ++// File wrapped in a BufReader implements Bufread ++fn open_hello_world() -> io::Result> { ++ let f = File::open("examples/hello_world.txt")?; ++ let b = BufReader::new(f); ++ let mut deflater = DeflateEncoder::new(b, Compression::fast()); ++ let mut buffer = Vec::new(); ++ deflater.read_to_end(&mut buffer)?; ++ Ok(buffer) ++} diff --cc vendor/flate2-1.0.2/examples/deflateencoder-read.rs index 000000000,000000000..7937af3fc new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflateencoder-read.rs @@@ -1,0 -1,0 +1,20 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::read::DeflateEncoder; ++ ++// Print the Deflate compressed representation of hello world ++fn main() { ++ println!("{:?}", deflateencoder_read_hello_world().unwrap()); ++} ++ ++// Return a vector containing the Defalte compressed version of hello world ++fn deflateencoder_read_hello_world() -> io::Result> { ++ let mut ret_vec = [0; 100]; ++ let c = b"hello world"; ++ let mut deflater = DeflateEncoder::new(&c[..], Compression::fast()); ++ let count = deflater.read(&mut ret_vec)?; ++ Ok(ret_vec[0..count].to_vec()) ++} diff --cc vendor/flate2-1.0.2/examples/deflateencoder-write.rs index 000000000,000000000..830429308 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/deflateencoder-write.rs @@@ -1,0 -1,0 +1,12 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::Compression; ++use flate2::write::DeflateEncoder; ++ ++// Vec implements Write to print the compressed bytes of sample string ++fn main() { ++ let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ println!("{:?}", e.finish().unwrap()); ++} diff --cc vendor/flate2-1.0.2/examples/gzbuilder.rs index 000000000,000000000..4b3c9c597 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzbuilder.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use std::fs::File; ++use flate2::GzBuilder; ++use flate2::Compression; ++ ++// Open file and debug print the contents compressed with gzip ++fn main() { ++ sample_builder().unwrap(); ++} ++ ++// GzBuilder opens a file and writes a sample string using Builder pattern ++fn sample_builder() -> Result<(), io::Error> { ++ let f = File::create("examples/hello_world.gz")?; ++ let mut gz = GzBuilder::new() ++ .filename("hello_world.txt") ++ .comment("test file, please delete") ++ .write(f, Compression::default()); ++ gz.write_all(b"hello world")?; ++ gz.finish()?; ++ Ok(()) ++} diff --cc vendor/flate2-1.0.2/examples/gzdecoder-bufread.rs index 000000000,000000000..268792425 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzdecoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::GzEncoder; ++use flate2::bufread::GzDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Gz Encoded vector of bytes and returns a string or error ++// Here &[u8] implements BufRead ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut gz = GzDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ gz.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/gzdecoder-read.rs index 000000000,000000000..1ab0ecb6c new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzdecoder-read.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::GzEncoder; ++use flate2::read::GzDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Gz Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut gz = GzDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ gz.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/gzdecoder-write.rs index 000000000,000000000..fab71e527 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzdecoder-write.rs @@@ -1,0 -1,0 +1,26 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::{GzEncoder, GzDecoder}; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_writer(bytes).unwrap()); ++} ++ ++// Uncompresses a Gz Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_writer(bytes: Vec) -> io::Result { ++ let mut writer = Vec::new(); ++ let mut decoder = GzDecoder::new(writer); ++ decoder.write(&bytes[..])?; ++ decoder.try_finish()?; ++ writer = decoder.finish()?; ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ Ok(return_string) ++} diff --cc vendor/flate2-1.0.2/examples/gzencoder-bufread.rs index 000000000,000000000..3d57cd1d5 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzencoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::bufread::GzEncoder; ++use std::fs::File; ++use std::io::BufReader; ++ ++// Open file and debug print the contents compressed with gzip ++fn main() { ++ println!("{:?}", open_hello_world().unwrap()); ++} ++ ++// Opens sample file, compresses the contents and returns a Vector or error ++// File wrapped in a BufReader implements Bufread ++fn open_hello_world() -> io::Result> { ++ let f = File::open("examples/hello_world.txt")?; ++ let b = BufReader::new(f); ++ let mut gz = GzEncoder::new(b, Compression::fast()); ++ let mut buffer = Vec::new(); ++ gz.read_to_end(&mut buffer)?; ++ Ok(buffer) ++} diff --cc vendor/flate2-1.0.2/examples/gzencoder-read.rs index 000000000,000000000..673296520 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzencoder-read.rs @@@ -1,0 -1,0 +1,20 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::read::GzEncoder; ++ ++// Print the GZ compressed representation of hello world ++fn main() { ++ println!("{:?}", gzencoder_read_hello_world().unwrap()); ++} ++ ++// Return a vector containing the GZ compressed version of hello world ++fn gzencoder_read_hello_world() -> io::Result> { ++ let mut ret_vec = [0; 100]; ++ let c = b"hello world"; ++ let mut z = GzEncoder::new(&c[..], Compression::fast()); ++ let count = z.read(&mut ret_vec)?; ++ Ok(ret_vec[0..count].to_vec()) ++} diff --cc vendor/flate2-1.0.2/examples/gzencoder-write.rs index 000000000,000000000..0bbe67d01 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzencoder-write.rs @@@ -1,0 -1,0 +1,12 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::Compression; ++use flate2::write::GzEncoder; ++ ++// Vec implements Write to print the compressed bytes of sample string ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ println!("{:?}", e.finish().unwrap()); ++} diff --cc vendor/flate2-1.0.2/examples/gzmultidecoder-bufread.rs index 000000000,000000000..037bbd9c3 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzmultidecoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::GzEncoder; ++use flate2::bufread::MultiGzDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Gz Encoded vector of bytes and returns a string or error ++// Here &[u8] implements BufRead ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut gz = MultiGzDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ gz.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/gzmultidecoder-read.rs index 000000000,000000000..792331f28 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/gzmultidecoder-read.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::GzEncoder; ++use flate2::read::MultiGzDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Gz Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut gz = MultiGzDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ gz.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/hello_world.txt index 000000000,000000000..557db03de new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/hello_world.txt @@@ -1,0 -1,0 +1,1 @@@ ++Hello World diff --cc vendor/flate2-1.0.2/examples/zlibdecoder-bufread.rs index 000000000,000000000..96fc84d37 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibdecoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::ZlibEncoder; ++use flate2::bufread::ZlibDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_bufreader(bytes).unwrap()); ++} ++ ++// Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++// Here &[u8] implements BufRead ++fn decode_bufreader(bytes: Vec) -> io::Result { ++ let mut z = ZlibDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ z.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/zlibdecoder-read.rs index 000000000,000000000..106ae8f7e new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibdecoder-read.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::ZlibEncoder; ++use flate2::read::ZlibDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++// Here &[u8] implements Read ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut z = ZlibDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ z.read_to_string(&mut s)?; ++ Ok(s) ++} diff --cc vendor/flate2-1.0.2/examples/zlibdecoder-write.rs index 000000000,000000000..24aa60f7c new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibdecoder-write.rs @@@ -1,0 -1,0 +1,26 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::write::ZlibEncoder; ++use flate2::write::ZlibDecoder; ++ ++// Compress a sample string and print it after transformation. ++fn main() { ++ let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ let bytes = e.finish().unwrap(); ++ println!("{}", decode_reader(bytes).unwrap()); ++} ++ ++// Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++// Here Vec implements Write ++fn decode_reader(bytes: Vec) -> io::Result { ++ let mut writer = Vec::new(); ++ let mut z = ZlibDecoder::new(writer); ++ z.write_all(&bytes[..])?; ++ writer = z.finish()?; ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ Ok(return_string) ++} diff --cc vendor/flate2-1.0.2/examples/zlibencoder-bufread.rs index 000000000,000000000..8b8d3d084 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibencoder-bufread.rs @@@ -1,0 -1,0 +1,24 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use std::io; ++use flate2::Compression; ++use flate2::bufread::ZlibEncoder; ++use std::fs::File; ++use std::io::BufReader; ++ ++// Open file and debug print the contents compressed with zlib ++fn main() { ++ println!("{:?}", open_hello_world().unwrap()); ++} ++ ++// Opens sample file, compresses the contents and returns a Vector or error ++// File wrapped in a BufReader implements Bufread ++fn open_hello_world() -> io::Result> { ++ let f = File::open("examples/hello_world.txt")?; ++ let b = BufReader::new(f); ++ let mut z = ZlibEncoder::new(b, Compression::fast()); ++ let mut buffer = Vec::new(); ++ z.read_to_end(&mut buffer)?; ++ Ok(buffer) ++} diff --cc vendor/flate2-1.0.2/examples/zlibencoder-read.rs index 000000000,000000000..a7cd3410f new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibencoder-read.rs @@@ -1,0 -1,0 +1,21 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::Compression; ++use flate2::read::ZlibEncoder; ++use std::fs::File; ++ ++// Open file and debug print the compressed contents ++fn main() { ++ println!("{:?}", open_hello_world().unwrap()); ++} ++ ++// Opens sample file, compresses the contents and returns a Vector or error ++// File implements Read ++fn open_hello_world() -> std::io::Result> { ++ let f = File::open("examples/hello_world.txt")?; ++ let mut z = ZlibEncoder::new(f, Compression::fast()); ++ let mut buffer = [0; 50]; ++ let byte_count = z.read(&mut buffer)?; ++ Ok(buffer[0..byte_count].to_vec()) ++} diff --cc vendor/flate2-1.0.2/examples/zlibencoder-write.rs index 000000000,000000000..35f704132 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/examples/zlibencoder-write.rs @@@ -1,0 -1,0 +1,12 @@@ ++extern crate flate2; ++ ++use std::io::prelude::*; ++use flate2::Compression; ++use flate2::write::ZlibEncoder; ++ ++// Vec implements Write to print the compressed bytes of sample string ++fn main() { ++ let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"Hello World").unwrap(); ++ println!("{:?}", e.finish().unwrap()); ++} diff --cc vendor/flate2-1.0.2/src/bufreader.rs index 000000000,000000000..9aa6a3ae9 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/bufreader.rs @@@ -1,0 -1,0 +1,104 @@@ ++// Copyright 2013 The Rust Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution and at ++// http://rust-lang.org/COPYRIGHT. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++use std::cmp; ++use std::io; ++use std::io::prelude::*; ++use std::mem; ++ ++pub struct BufReader { ++ inner: R, ++ buf: Box<[u8]>, ++ pos: usize, ++ cap: usize, ++} ++ ++impl ::std::fmt::Debug for BufReader ++where ++ R: ::std::fmt::Debug, ++{ ++ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ fmt.debug_struct("BufReader") ++ .field("reader", &self.inner) ++ .field( ++ "buffer", ++ &format_args!("{}/{}", self.cap - self.pos, self.buf.len()), ++ ) ++ .finish() ++ } ++} ++ ++impl BufReader { ++ pub fn new(inner: R) -> BufReader { ++ BufReader::with_buf(vec![0; 32 * 1024], inner) ++ } ++ ++ pub fn with_buf(buf: Vec, inner: R) -> BufReader { ++ BufReader { ++ inner: inner, ++ buf: buf.into_boxed_slice(), ++ pos: 0, ++ cap: 0, ++ } ++ } ++} ++ ++impl BufReader { ++ pub fn get_ref(&self) -> &R { ++ &self.inner ++ } ++ ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.inner ++ } ++ ++ pub fn into_inner(self) -> R { ++ self.inner ++ } ++ ++ pub fn reset(&mut self, inner: R) -> R { ++ self.pos = 0; ++ self.cap = 0; ++ mem::replace(&mut self.inner, inner) ++ } ++} ++ ++impl Read for BufReader { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ // If we don't have any buffered data and we're doing a massive read ++ // (larger than our internal buffer), bypass our internal buffer ++ // entirely. ++ if self.pos == self.cap && buf.len() >= self.buf.len() { ++ return self.inner.read(buf); ++ } ++ let nread = { ++ let mut rem = self.fill_buf()?; ++ rem.read(buf)? ++ }; ++ self.consume(nread); ++ Ok(nread) ++ } ++} ++ ++impl BufRead for BufReader { ++ fn fill_buf(&mut self) -> io::Result<&[u8]> { ++ // If we've reached the end of our internal buffer then we need to fetch ++ // some more data from the underlying reader. ++ if self.pos == self.cap { ++ self.cap = self.inner.read(&mut self.buf)?; ++ self.pos = 0; ++ } ++ Ok(&self.buf[self.pos..self.cap]) ++ } ++ ++ fn consume(&mut self, amt: usize) { ++ self.pos = cmp::min(self.pos + amt, self.cap); ++ } ++} diff --cc vendor/flate2-1.0.2/src/crc.rs index 000000000,000000000..0d621a921 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/crc.rs @@@ -1,0 -1,0 +1,182 @@@ ++//! Simple CRC bindings backed by miniz.c ++ ++use std::io::prelude::*; ++use std::io; ++use libc; ++ ++use ffi; ++ ++/// The CRC calculated by a [`CrcReader`]. ++/// ++/// [`CrcReader`]: struct.CrcReader.html ++#[derive(Debug)] ++pub struct Crc { ++ crc: libc::c_ulong, ++ amt: u32, ++} ++ ++/// A wrapper around a [`Read`] that calculates the CRC. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++#[derive(Debug)] ++pub struct CrcReader { ++ inner: R, ++ crc: Crc, ++} ++ ++impl Crc { ++ /// Create a new CRC. ++ pub fn new() -> Crc { ++ Crc { crc: 0, amt: 0 } ++ } ++ ++ /// bla ++ pub fn sum(&self) -> u32 { ++ self.crc as u32 ++ } ++ ++ /// The number of bytes that have been used to calculate the CRC. ++ /// This value is only accurate if the amount is lower than 232. ++ pub fn amount(&self) -> u32 { ++ self.amt ++ } ++ ++ /// Update the CRC with the bytes in `data`. ++ pub fn update(&mut self, data: &[u8]) { ++ self.amt = self.amt.wrapping_add(data.len() as u32); ++ self.crc = unsafe { ffi::mz_crc32(self.crc, data.as_ptr(), data.len() as libc::size_t) }; ++ } ++ ++ /// Reset the CRC. ++ pub fn reset(&mut self) { ++ self.crc = 0; ++ self.amt = 0; ++ } ++ ++ /// Combine the CRC with the CRC for the subsequent block of bytes. ++ pub fn combine(&mut self, additional_crc: &Crc) { ++ self.crc = unsafe { ++ ffi::mz_crc32_combine( ++ self.crc as ::libc::c_ulong, ++ additional_crc.crc as ::libc::c_ulong, ++ additional_crc.amt as ::libc::off_t, ++ ) ++ }; ++ self.amt += additional_crc.amt; ++ } ++} ++ ++impl CrcReader { ++ /// Create a new CrcReader. ++ pub fn new(r: R) -> CrcReader { ++ CrcReader { ++ inner: r, ++ crc: Crc::new(), ++ } ++ } ++} ++ ++impl CrcReader { ++ /// Get the Crc for this CrcReader. ++ pub fn crc(&self) -> &Crc { ++ &self.crc ++ } ++ ++ /// Get the reader that is wrapped by this CrcReader. ++ pub fn into_inner(self) -> R { ++ self.inner ++ } ++ ++ /// Get the reader that is wrapped by this CrcReader by reference. ++ pub fn get_ref(&self) -> &R { ++ &self.inner ++ } ++ ++ /// Get a mutable reference to the reader that is wrapped by this CrcReader. ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.inner ++ } ++ ++ /// Reset the Crc in this CrcReader. ++ pub fn reset(&mut self) { ++ self.crc.reset(); ++ } ++} ++ ++impl Read for CrcReader { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ let amt = self.inner.read(into)?; ++ self.crc.update(&into[..amt]); ++ Ok(amt) ++ } ++} ++ ++impl BufRead for CrcReader { ++ fn fill_buf(&mut self) -> io::Result<&[u8]> { ++ self.inner.fill_buf() ++ } ++ fn consume(&mut self, amt: usize) { ++ if let Ok(data) = self.inner.fill_buf() { ++ self.crc.update(&data[..amt]); ++ } ++ self.inner.consume(amt); ++ } ++} ++ ++/// A wrapper around a [`Write`] that calculates the CRC. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++#[derive(Debug)] ++pub struct CrcWriter { ++ inner: W, ++ crc: Crc, ++} ++ ++impl CrcWriter { ++ /// Get the Crc for this CrcWriter. ++ pub fn crc(&self) -> &Crc { ++ &self.crc ++ } ++ ++ /// Get the writer that is wrapped by this CrcWriter. ++ pub fn into_inner(self) -> W { ++ self.inner ++ } ++ ++ /// Get the writer that is wrapped by this CrcWriter by reference. ++ pub fn get_ref(&self) -> &W { ++ &self.inner ++ } ++ ++ /// Get a mutable reference to the writer that is wrapped by this CrcWriter. ++ pub fn get_mut(&mut self) -> &mut W { ++ &mut self.inner ++ } ++ ++ /// Reset the Crc in this CrcWriter. ++ pub fn reset(&mut self) { ++ self.crc.reset(); ++ } ++} ++ ++impl CrcWriter { ++ /// Create a new CrcWriter. ++ pub fn new(w: W) -> CrcWriter { ++ CrcWriter { ++ inner: w, ++ crc: Crc::new(), ++ } ++ } ++} ++ ++impl Write for CrcWriter { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ let amt = try!(self.inner.write(buf)); ++ self.crc.update(&buf[..amt]); ++ Ok(amt) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} diff --cc vendor/flate2-1.0.2/src/deflate/bufread.rs index 000000000,000000000..ab4dd2969 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/deflate/bufread.rs @@@ -1,0 -1,0 +1,268 @@@ ++use std::io::prelude::*; ++use std::io; ++use std::mem; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use zio; ++use {Compress, Decompress}; ++ ++/// A DEFLATE encoder, or compressor. ++/// ++/// This structure implements a [`BufRead`] interface and will read uncompressed ++/// data from an underlying stream and emit a stream of compressed data. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// use flate2::Compression; ++/// use flate2::bufread::DeflateEncoder; ++/// use std::fs::File; ++/// use std::io::BufReader; ++/// ++/// # fn main() { ++/// # println!("{:?}", open_hello_world().unwrap()); ++/// # } ++/// # ++/// // Opens sample file, compresses the contents and returns a Vector ++/// fn open_hello_world() -> io::Result> { ++/// let f = File::open("examples/hello_world.txt")?; ++/// let b = BufReader::new(f); ++/// let mut deflater = DeflateEncoder::new(b, Compression::fast()); ++/// let mut buffer = Vec::new(); ++/// deflater.read_to_end(&mut buffer)?; ++/// Ok(buffer) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateEncoder { ++ obj: R, ++ data: Compress, ++} ++ ++impl DeflateEncoder { ++ /// Creates a new encoder which will read uncompressed data from the given ++ /// stream and emit the compressed stream. ++ pub fn new(r: R, level: ::Compression) -> DeflateEncoder { ++ DeflateEncoder { ++ obj: r, ++ data: Compress::new(level, false), ++ } ++ } ++} ++ ++pub fn reset_encoder_data(zlib: &mut DeflateEncoder) { ++ zlib.data.reset(); ++} ++ ++impl DeflateEncoder { ++ /// Resets the state of this encoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This function will reset the internal state of this encoder and replace ++ /// the input stream with the one provided, returning the previous input ++ /// stream. Future data read from this encoder will be the compressed ++ /// version of `r`'s data. ++ pub fn reset(&mut self, r: R) -> R { ++ reset_encoder_data(self); ++ mem::replace(&mut self.obj, r) ++ } ++ ++ /// Acquires a reference to the underlying reader ++ pub fn get_ref(&self) -> &R { ++ &self.obj ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.obj ++ } ++ ++ /// Consumes this encoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.obj ++ } ++ ++ /// Returns the number of bytes that have been read into this compressor. ++ /// ++ /// Note that not all bytes read from the underlying object may be accounted ++ /// for, there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been read yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.data.total_out() ++ } ++} ++ ++impl Read for DeflateEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ zio::read(&mut self.obj, &mut self.data, buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateEncoder {} ++ ++impl Write for DeflateEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} ++ ++/// A DEFLATE decoder, or decompressor. ++/// ++/// This structure implements a [`BufRead`] interface and takes a stream of ++/// compressed data as input, providing the decompressed data when read from. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::DeflateEncoder; ++/// use flate2::bufread::DeflateDecoder; ++/// ++/// # fn main() { ++/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements Read ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut deflater = DeflateDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// deflater.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateDecoder { ++ obj: R, ++ data: Decompress, ++} ++ ++pub fn reset_decoder_data(zlib: &mut DeflateDecoder) { ++ zlib.data = Decompress::new(false); ++} ++ ++impl DeflateDecoder { ++ /// Creates a new decoder which will decompress data read from the given ++ /// stream. ++ pub fn new(r: R) -> DeflateDecoder { ++ DeflateDecoder { ++ obj: r, ++ data: Decompress::new(false), ++ } ++ } ++} ++ ++impl DeflateDecoder { ++ /// Resets the state of this decoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This will reset the internal state of this decoder and replace the ++ /// input stream with the one provided, returning the previous input ++ /// stream. Future data read from this decoder will be the decompressed ++ /// version of `r`'s data. ++ pub fn reset(&mut self, r: R) -> R { ++ reset_decoder_data(self); ++ mem::replace(&mut self.obj, r) ++ } ++ ++ /// Resets the state of this decoder's data ++ /// ++ /// This will reset the internal state of this decoder. It will continue ++ /// reading from the same stream. ++ pub fn reset_data(&mut self) { ++ reset_decoder_data(self); ++ } ++ ++ /// Acquires a reference to the underlying stream ++ pub fn get_ref(&self) -> &R { ++ &self.obj ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.obj ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.obj ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed. ++ /// ++ /// Note that this will likely be smaller than what the decompressor ++ /// actually read from the underlying stream due to buffering. ++ pub fn total_in(&self) -> u64 { ++ self.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has produced. ++ pub fn total_out(&self) -> u64 { ++ self.data.total_out() ++ } ++} ++ ++impl Read for DeflateDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ zio::read(&mut self.obj, &mut self.data, into) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateDecoder {} ++ ++impl Write for DeflateDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} diff --cc vendor/flate2-1.0.2/src/deflate/mod.rs index 000000000,000000000..9ea81dcc6 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/deflate/mod.rs @@@ -1,0 -1,0 +1,193 @@@ ++pub mod bufread; ++pub mod read; ++pub mod write; ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ ++ use rand::{thread_rng, Rng}; ++ ++ use super::{read, write}; ++ use Compression; ++ ++ #[test] ++ fn roundtrip() { ++ let mut real = Vec::new(); ++ let mut w = write::DeflateEncoder::new(Vec::new(), Compression::default()); ++ let v = ::random_bytes().take(1024).collect::>(); ++ for _ in 0..200 { ++ let to_write = &v[..thread_rng().gen_range(0, v.len())]; ++ real.extend(to_write.iter().map(|x| *x)); ++ w.write_all(to_write).unwrap(); ++ } ++ let result = w.finish().unwrap(); ++ let mut r = read::DeflateDecoder::new(&result[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == real); ++ } ++ ++ #[test] ++ fn drop_writes() { ++ let mut data = Vec::new(); ++ write::DeflateEncoder::new(&mut data, Compression::default()) ++ .write_all(b"foo") ++ .unwrap(); ++ let mut r = read::DeflateDecoder::new(&data[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == b"foo"); ++ } ++ ++ #[test] ++ fn total_in() { ++ let mut real = Vec::new(); ++ let mut w = write::DeflateEncoder::new(Vec::new(), Compression::default()); ++ let v = ::random_bytes().take(1024).collect::>(); ++ for _ in 0..200 { ++ let to_write = &v[..thread_rng().gen_range(0, v.len())]; ++ real.extend(to_write.iter().map(|x| *x)); ++ w.write_all(to_write).unwrap(); ++ } ++ let mut result = w.finish().unwrap(); ++ ++ let result_len = result.len(); ++ ++ for _ in 0..200 { ++ result.extend(v.iter().map(|x| *x)); ++ } ++ ++ let mut r = read::DeflateDecoder::new(&result[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == real); ++ assert_eq!(r.total_in(), result_len as u64); ++ } ++ ++ #[test] ++ fn roundtrip2() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut r = ++ read::DeflateDecoder::new(read::DeflateEncoder::new(&v[..], Compression::default())); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert_eq!(ret, v); ++ } ++ ++ #[test] ++ fn roundtrip3() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut w = write::DeflateEncoder::new( ++ write::DeflateDecoder::new(Vec::new()), ++ Compression::default(), ++ ); ++ w.write_all(&v).unwrap(); ++ let w = w.finish().unwrap().finish().unwrap(); ++ assert!(w == v); ++ } ++ ++ #[test] ++ fn reset_writer() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut w = write::DeflateEncoder::new(Vec::new(), Compression::default()); ++ w.write_all(&v).unwrap(); ++ let a = w.reset(Vec::new()).unwrap(); ++ w.write_all(&v).unwrap(); ++ let b = w.finish().unwrap(); ++ ++ let mut w = write::DeflateEncoder::new(Vec::new(), Compression::default()); ++ w.write_all(&v).unwrap(); ++ let c = w.finish().unwrap(); ++ assert!(a == b && b == c); ++ } ++ ++ #[test] ++ fn reset_reader() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); ++ let mut r = read::DeflateEncoder::new(&v[..], Compression::default()); ++ r.read_to_end(&mut a).unwrap(); ++ r.reset(&v[..]); ++ r.read_to_end(&mut b).unwrap(); ++ ++ let mut r = read::DeflateEncoder::new(&v[..], Compression::default()); ++ r.read_to_end(&mut c).unwrap(); ++ assert!(a == b && b == c); ++ } ++ ++ #[test] ++ fn reset_decoder() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut w = write::DeflateEncoder::new(Vec::new(), Compression::default()); ++ w.write_all(&v).unwrap(); ++ let data = w.finish().unwrap(); ++ ++ { ++ let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); ++ let mut r = read::DeflateDecoder::new(&data[..]); ++ r.read_to_end(&mut a).unwrap(); ++ r.reset(&data); ++ r.read_to_end(&mut b).unwrap(); ++ ++ let mut r = read::DeflateDecoder::new(&data[..]); ++ r.read_to_end(&mut c).unwrap(); ++ assert!(a == b && b == c && c == v); ++ } ++ ++ { ++ let mut w = write::DeflateDecoder::new(Vec::new()); ++ w.write_all(&data).unwrap(); ++ let a = w.reset(Vec::new()).unwrap(); ++ w.write_all(&data).unwrap(); ++ let b = w.finish().unwrap(); ++ ++ let mut w = write::DeflateDecoder::new(Vec::new()); ++ w.write_all(&data).unwrap(); ++ let c = w.finish().unwrap(); ++ assert!(a == b && b == c && c == v); ++ } ++ } ++ ++ #[test] ++ fn zero_length_read_with_data() { ++ let m = vec![3u8; 128 * 1024 + 1]; ++ let mut c = read::DeflateEncoder::new(&m[..], Compression::default()); ++ ++ let mut result = Vec::new(); ++ c.read_to_end(&mut result).unwrap(); ++ ++ let mut d = read::DeflateDecoder::new(&result[..]); ++ let mut data = Vec::new(); ++ assert!(d.read(&mut data).unwrap() == 0); ++ } ++ ++ #[test] ++ fn qc_reader() { ++ ::quickcheck::quickcheck(test as fn(_) -> _); ++ ++ fn test(v: Vec) -> bool { ++ let mut r = read::DeflateDecoder::new(read::DeflateEncoder::new( ++ &v[..], ++ Compression::default(), ++ )); ++ let mut v2 = Vec::new(); ++ r.read_to_end(&mut v2).unwrap(); ++ v == v2 ++ } ++ } ++ ++ #[test] ++ fn qc_writer() { ++ ::quickcheck::quickcheck(test as fn(_) -> _); ++ ++ fn test(v: Vec) -> bool { ++ let mut w = write::DeflateEncoder::new( ++ write::DeflateDecoder::new(Vec::new()), ++ Compression::default(), ++ ); ++ w.write_all(&v).unwrap(); ++ v == w.finish().unwrap().finish().unwrap() ++ } ++ } ++} diff --cc vendor/flate2-1.0.2/src/deflate/read.rs index 000000000,000000000..6dc6ee150 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/deflate/read.rs @@@ -1,0 -1,0 +1,266 @@@ ++use std::io::prelude::*; ++use std::io; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use bufreader::BufReader; ++use super::bufread; ++ ++/// A DEFLATE encoder, or compressor. ++/// ++/// This structure implements a [`Read`] interface and will read uncompressed ++/// data from an underlying stream and emit a stream of compressed data. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// use flate2::Compression; ++/// use flate2::read::DeflateEncoder; ++/// ++/// # fn main() { ++/// # println!("{:?}", deflateencoder_read_hello_world().unwrap()); ++/// # } ++/// # ++/// // Return a vector containing the Deflate compressed version of hello world ++/// fn deflateencoder_read_hello_world() -> io::Result> { ++/// let mut ret_vec = [0;100]; ++/// let c = b"hello world"; ++/// let mut deflater = DeflateEncoder::new(&c[..], Compression::fast()); ++/// let count = deflater.read(&mut ret_vec)?; ++/// Ok(ret_vec[0..count].to_vec()) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateEncoder { ++ inner: bufread::DeflateEncoder>, ++} ++ ++impl DeflateEncoder { ++ /// Creates a new encoder which will read uncompressed data from the given ++ /// stream and emit the compressed stream. ++ pub fn new(r: R, level: ::Compression) -> DeflateEncoder { ++ DeflateEncoder { ++ inner: bufread::DeflateEncoder::new(BufReader::new(r), level), ++ } ++ } ++} ++ ++impl DeflateEncoder { ++ /// Resets the state of this encoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This function will reset the internal state of this encoder and replace ++ /// the input stream with the one provided, returning the previous input ++ /// stream. Future data read from this encoder will be the compressed ++ /// version of `r`'s data. ++ /// ++ /// Note that there may be currently buffered data when this function is ++ /// called, and in that case the buffered data is discarded. ++ pub fn reset(&mut self, r: R) -> R { ++ super::bufread::reset_encoder_data(&mut self.inner); ++ self.inner.get_mut().reset(r) ++ } ++ ++ /// Acquires a reference to the underlying reader ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this encoder, returning the underlying reader. ++ /// ++ /// Note that there may be buffered bytes which are not re-acquired as part ++ /// of this transition. It's recommended to only call this function after ++ /// EOF has been reached. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++ ++ /// Returns the number of bytes that have been read into this compressor. ++ /// ++ /// Note that not all bytes read from the underlying object may be accounted ++ /// for, there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been read yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out() ++ } ++} ++ ++impl Read for DeflateEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateEncoder {} ++ ++impl Write for DeflateEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} ++ ++/// A DEFLATE decoder, or decompressor. ++/// ++/// This structure implements a [`Read`] interface and takes a stream of ++/// compressed data as input, providing the decompressed data when read from. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::DeflateEncoder; ++/// use flate2::read::DeflateDecoder; ++/// ++/// # fn main() { ++/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements Read ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut deflater = DeflateDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// deflater.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateDecoder { ++ inner: bufread::DeflateDecoder>, ++} ++ ++impl DeflateDecoder { ++ /// Creates a new decoder which will decompress data read from the given ++ /// stream. ++ pub fn new(r: R) -> DeflateDecoder { ++ DeflateDecoder::new_with_buf(r, vec![0; 32 * 1024]) ++ } ++ ++ /// Same as `new`, but the intermediate buffer for data is specified. ++ /// ++ /// Note that the capacity of the intermediate buffer is never increased, ++ /// and it is recommended for it to be large. ++ pub fn new_with_buf(r: R, buf: Vec) -> DeflateDecoder { ++ DeflateDecoder { ++ inner: bufread::DeflateDecoder::new(BufReader::with_buf(buf, r)), ++ } ++ } ++} ++ ++impl DeflateDecoder { ++ /// Resets the state of this decoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This will reset the internal state of this decoder and replace the ++ /// input stream with the one provided, returning the previous input ++ /// stream. Future data read from this decoder will be the decompressed ++ /// version of `r`'s data. ++ /// ++ /// Note that there may be currently buffered data when this function is ++ /// called, and in that case the buffered data is discarded. ++ pub fn reset(&mut self, r: R) -> R { ++ super::bufread::reset_decoder_data(&mut self.inner); ++ self.inner.get_mut().reset(r) ++ } ++ ++ /// Acquires a reference to the underlying stream ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ /// ++ /// Note that there may be buffered bytes which are not re-acquired as part ++ /// of this transition. It's recommended to only call this function after ++ /// EOF has been reached. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed. ++ /// ++ /// Note that this will likely be smaller than what the decompressor ++ /// actually read from the underlying stream due to buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has produced. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out() ++ } ++} ++ ++impl Read for DeflateDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ self.inner.read(into) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateDecoder {} ++ ++impl Write for DeflateDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} diff --cc vendor/flate2-1.0.2/src/deflate/write.rs index 000000000,000000000..76dadad37 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/deflate/write.rs @@@ -1,0 -1,0 +1,349 @@@ ++use std::io::prelude::*; ++use std::io; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use zio; ++use {Compress, Decompress}; ++ ++/// A DEFLATE encoder, or compressor. ++/// ++/// This structure implements a [`Write`] interface and takes a stream of ++/// uncompressed data, writing the compressed data to the wrapped writer. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use flate2::Compression; ++/// use flate2::write::DeflateEncoder; ++/// ++/// // Vec implements Write to print the compressed bytes of sample string ++/// # fn main() { ++/// ++/// let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++/// e.write_all(b"Hello World").unwrap(); ++/// println!("{:?}", e.finish().unwrap()); ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateEncoder { ++ inner: zio::Writer, ++} ++ ++impl DeflateEncoder { ++ /// Creates a new encoder which will write compressed data to the stream ++ /// given at the given compression level. ++ /// ++ /// When this encoder is dropped or unwrapped the final pieces of data will ++ /// be flushed. ++ pub fn new(w: W, level: ::Compression) -> DeflateEncoder { ++ DeflateEncoder { ++ inner: zio::Writer::new(w, Compress::new(level, false)), ++ } ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutating the output/input state of the stream may corrupt this ++ /// object, so care must be taken when using this method. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut() ++ } ++ ++ /// Resets the state of this encoder entirely, swapping out the output ++ /// stream for another. ++ /// ++ /// This function will finish encoding the current stream into the current ++ /// output stream before swapping out the two output streams. If the stream ++ /// cannot be finished an error is returned. ++ /// ++ /// After the current stream has been finished, this will reset the internal ++ /// state of this encoder and replace the output stream with the one ++ /// provided, returning the previous output stream. Future data written to ++ /// this encoder will be the compressed into the stream `w` provided. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn reset(&mut self, w: W) -> io::Result { ++ self.inner.finish()?; ++ self.inner.data.reset(); ++ Ok(self.inner.replace(w)) ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.inner.finish() ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream, close off the compressed ++ /// stream and, if successful, return the contained writer. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.inner.finish()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream and then return the contained ++ /// writer if the flush succeeded. ++ /// The compressed stream will not closed but only flushed. This ++ /// means that obtained byte array can by extended by another deflated ++ /// stream. To close the stream add the two bytes 0x3 and 0x0. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn flush_finish(mut self) -> io::Result { ++ self.inner.flush()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Returns the number of bytes that have been written to this compresor. ++ /// ++ /// Note that not all bytes written to this object may be accounted for, ++ /// there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been written yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.inner.data.total_out() ++ } ++} ++ ++impl Write for DeflateEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.inner.write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.inner.finish()); ++ self.inner.get_mut().shutdown() ++ } ++} ++ ++impl Read for DeflateEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateEncoder {} ++ ++/// A DEFLATE decoder, or decompressor. ++/// ++/// This structure implements a [`Write`] and will emit a stream of decompressed ++/// data when fed a stream of compressed data. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::DeflateEncoder; ++/// use flate2::write::DeflateDecoder; ++/// ++/// # fn main() { ++/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_writer(bytes).unwrap()); ++/// # } ++/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error ++/// // Here Vec implements Write ++/// fn decode_writer(bytes: Vec) -> io::Result { ++/// let mut writer = Vec::new(); ++/// let mut deflater = DeflateDecoder::new(writer); ++/// deflater.write_all(&bytes[..])?; ++/// writer = deflater.finish()?; ++/// let return_string = String::from_utf8(writer).expect("String parsing error"); ++/// Ok(return_string) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct DeflateDecoder { ++ inner: zio::Writer, ++} ++ ++impl DeflateDecoder { ++ /// Creates a new decoder which will write uncompressed data to the stream. ++ /// ++ /// When this encoder is dropped or unwrapped the final pieces of data will ++ /// be flushed. ++ pub fn new(w: W) -> DeflateDecoder { ++ DeflateDecoder { ++ inner: zio::Writer::new(w, Decompress::new(false)), ++ } ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutating the output/input state of the stream may corrupt this ++ /// object, so care must be taken when using this method. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut() ++ } ++ ++ /// Resets the state of this decoder entirely, swapping out the output ++ /// stream for another. ++ /// ++ /// This function will finish encoding the current stream into the current ++ /// output stream before swapping out the two output streams. ++ /// ++ /// This will then reset the internal state of this decoder and replace the ++ /// output stream with the one provided, returning the previous output ++ /// stream. Future data written to this decoder will be decompressed into ++ /// the output stream `w`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to finish the stream, and if that I/O ++ /// returns an error then that will be returned from this function. ++ pub fn reset(&mut self, w: W) -> io::Result { ++ self.inner.finish()?; ++ self.inner.data = Decompress::new(false); ++ Ok(self.inner.replace(w)) ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to finish the stream, returning any ++ /// errors which happen. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.inner.finish() ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream and then return the contained ++ /// writer if the flush succeeded. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.inner.finish()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed for ++ /// decompression. ++ /// ++ /// Note that this will likely be smaller than the number of bytes ++ /// successfully written to this stream due to internal buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has written to its ++ /// output stream. ++ pub fn total_out(&self) -> u64 { ++ self.inner.data.total_out() ++ } ++} ++ ++impl Write for DeflateDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.inner.write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for DeflateDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.inner.finish()); ++ self.inner.get_mut().shutdown() ++ } ++} ++ ++impl Read for DeflateDecoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for DeflateDecoder {} diff --cc vendor/flate2-1.0.2/src/ffi.rs index 000000000,000000000..f154ba859 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/ffi.rs @@@ -1,0 -1,0 +1,278 @@@ ++pub use self::imp::*; ++ ++#[cfg(feature = "zlib")] ++#[allow(bad_style)] ++mod imp { ++ extern crate libz_sys as z; ++ use std::mem; ++ use std::ops::{Deref, DerefMut}; ++ use libc::{c_char, c_int, c_uint, c_ulong, size_t}; ++ ++ pub use self::z::*; ++ pub use self::z::deflateEnd as mz_deflateEnd; ++ pub use self::z::inflateEnd as mz_inflateEnd; ++ pub use self::z::deflateReset as mz_deflateReset; ++ pub use self::z::deflate as mz_deflate; ++ pub use self::z::inflate as mz_inflate; ++ pub use self::z::z_stream as mz_stream; ++ ++ pub use self::z::Z_BLOCK as MZ_BLOCK; ++ pub use self::z::Z_BUF_ERROR as MZ_BUF_ERROR; ++ pub use self::z::Z_DATA_ERROR as MZ_DATA_ERROR; ++ pub use self::z::Z_DEFAULT_STRATEGY as MZ_DEFAULT_STRATEGY; ++ pub use self::z::Z_DEFLATED as MZ_DEFLATED; ++ pub use self::z::Z_FINISH as MZ_FINISH; ++ pub use self::z::Z_FULL_FLUSH as MZ_FULL_FLUSH; ++ pub use self::z::Z_NO_FLUSH as MZ_NO_FLUSH; ++ pub use self::z::Z_OK as MZ_OK; ++ pub use self::z::Z_PARTIAL_FLUSH as MZ_PARTIAL_FLUSH; ++ pub use self::z::Z_STREAM_END as MZ_STREAM_END; ++ pub use self::z::Z_SYNC_FLUSH as MZ_SYNC_FLUSH; ++ pub use self::z::Z_STREAM_ERROR as MZ_STREAM_ERROR; ++ pub use self::z::Z_NEED_DICT as MZ_NEED_DICT; ++ ++ pub const MZ_DEFAULT_WINDOW_BITS: c_int = 15; ++ ++ pub unsafe extern "C" fn mz_crc32(crc: c_ulong, ptr: *const u8, len: size_t) -> c_ulong { ++ z::crc32(crc, ptr, len as c_uint) ++ } ++ ++ pub unsafe extern "C" fn mz_crc32_combine( ++ crc1: c_ulong, ++ crc2: c_ulong, ++ len2: z_off_t, ++ ) -> c_ulong { ++ z::crc32_combine(crc1, crc2, len2) ++ } ++ ++ const ZLIB_VERSION: &'static str = "1.2.8\0"; ++ ++ pub unsafe extern "C" fn mz_deflateInit2( ++ stream: *mut mz_stream, ++ level: c_int, ++ method: c_int, ++ window_bits: c_int, ++ mem_level: c_int, ++ strategy: c_int, ++ ) -> c_int { ++ z::deflateInit2_( ++ stream, ++ level, ++ method, ++ window_bits, ++ mem_level, ++ strategy, ++ ZLIB_VERSION.as_ptr() as *const c_char, ++ mem::size_of::() as c_int, ++ ) ++ } ++ pub unsafe extern "C" fn mz_inflateInit2(stream: *mut mz_stream, window_bits: c_int) -> c_int { ++ z::inflateInit2_( ++ stream, ++ window_bits, ++ ZLIB_VERSION.as_ptr() as *const c_char, ++ mem::size_of::() as c_int, ++ ) ++ } ++ ++ pub struct StreamWrapper { ++ inner: Box, ++ } ++ ++ impl ::std::fmt::Debug for StreamWrapper { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ write!(f, "StreamWrapper") ++ } ++ } ++ ++ impl Default for StreamWrapper { ++ fn default() -> StreamWrapper { ++ StreamWrapper { ++ inner: Box::new(unsafe { mem::zeroed() }), ++ } ++ } ++ } ++ ++ impl Deref for StreamWrapper { ++ type Target = mz_stream; ++ ++ fn deref(&self) -> &Self::Target { ++ &*self.inner ++ } ++ } ++ ++ impl DerefMut for StreamWrapper { ++ fn deref_mut(&mut self) -> &mut Self::Target { ++ &mut *self.inner ++ } ++ } ++} ++ ++#[cfg(all(not(feature = "zlib"), feature = "rust_backend"))] ++mod imp { ++ extern crate miniz_oxide_c_api; ++ use std::ops::{Deref, DerefMut}; ++ ++ pub use ffi::crc_imp::*; ++ pub use self::miniz_oxide_c_api::*; ++ pub use self::miniz_oxide_c_api::lib_oxide::*; ++ ++ #[derive(Debug, Default)] ++ pub struct StreamWrapper { ++ inner: mz_stream, ++ } ++ ++ impl Deref for StreamWrapper { ++ type Target = mz_stream; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.inner ++ } ++ } ++ ++ impl DerefMut for StreamWrapper { ++ fn deref_mut(&mut self) -> &mut Self::Target { ++ &mut self.inner ++ } ++ } ++} ++ ++#[cfg(all(not(feature = "zlib"), not(feature = "rust_backend")))] ++mod imp { ++ extern crate miniz_sys; ++ use std::mem; ++ use std::ops::{Deref, DerefMut}; ++ ++ pub use self::miniz_sys::*; ++ pub use ffi::crc_imp::*; ++ ++ pub struct StreamWrapper { ++ inner: mz_stream, ++ } ++ ++ impl ::std::fmt::Debug for StreamWrapper { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ write!(f, "StreamWrapper") ++ } ++ } ++ ++ impl Default for StreamWrapper { ++ fn default() -> StreamWrapper { ++ StreamWrapper { ++ inner: unsafe { mem::zeroed() }, ++ } ++ } ++ } ++ ++ impl Deref for StreamWrapper { ++ type Target = mz_stream; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.inner ++ } ++ } ++ ++ impl DerefMut for StreamWrapper { ++ fn deref_mut(&mut self) -> &mut Self::Target { ++ &mut self.inner ++ } ++ } ++} ++ ++#[cfg(not(feature = "zlib"))] ++mod crc_imp { ++ use libc::{c_ulong, off_t}; ++ pub unsafe extern "C" fn mz_crc32_combine( ++ crc1: c_ulong, ++ crc2: c_ulong, ++ len2: off_t, ++ ) -> c_ulong { ++ crc32_combine_(crc1, crc2, len2) ++ } ++ ++ // gf2_matrix_times, gf2_matrix_square and crc32_combine_ are ported from ++ // zlib. ++ ++ fn gf2_matrix_times(mat: &[c_ulong; 32], mut vec: c_ulong) -> c_ulong { ++ let mut sum = 0; ++ let mut mat_pos = 0; ++ while vec != 0 { ++ if vec & 1 == 1 { ++ sum ^= mat[mat_pos]; ++ } ++ vec >>= 1; ++ mat_pos += 1; ++ } ++ sum ++ } ++ ++ fn gf2_matrix_square(square: &mut [c_ulong; 32], mat: &[c_ulong; 32]) { ++ for n in 0..32 { ++ square[n] = gf2_matrix_times(mat, mat[n]); ++ } ++ } ++ ++ fn crc32_combine_(mut crc1: c_ulong, crc2: c_ulong, mut len2: off_t) -> c_ulong { ++ let mut row; ++ ++ let mut even = [0; 32]; /* even-power-of-two zeros operator */ ++ let mut odd = [0; 32]; /* odd-power-of-two zeros operator */ ++ ++ /* degenerate case (also disallow negative lengths) */ ++ if len2 <= 0 { ++ return crc1; ++ } ++ ++ /* put operator for one zero bit in odd */ ++ odd[0] = 0xedb88320; /* CRC-32 polynomial */ ++ row = 1; ++ for n in 1..32 { ++ odd[n] = row; ++ row <<= 1; ++ } ++ ++ /* put operator for two zero bits in even */ ++ gf2_matrix_square(&mut even, &odd); ++ ++ /* put operator for four zero bits in odd */ ++ gf2_matrix_square(&mut odd, &even); ++ ++ /* apply len2 zeros to crc1 (first square will put the operator for one ++ zero byte, eight zero bits, in even) */ ++ loop { ++ /* apply zeros operator for this bit of len2 */ ++ gf2_matrix_square(&mut even, &odd); ++ if len2 & 1 == 1 { ++ crc1 = gf2_matrix_times(&even, crc1); ++ } ++ len2 >>= 1; ++ ++ /* if no more bits set, then done */ ++ if len2 == 0 { ++ break; ++ } ++ ++ /* another iteration of the loop with odd and even swapped */ ++ gf2_matrix_square(&mut odd, &even); ++ if len2 & 1 == 1 { ++ crc1 = gf2_matrix_times(&odd, crc1); ++ } ++ len2 >>= 1; ++ ++ /* if no more bits set, then done */ ++ if len2 == 0 { ++ break; ++ } ++ } ++ ++ /* return combined crc */ ++ crc1 ^= crc2; ++ crc1 ++ } ++} ++ ++#[test] ++fn crc32_combine() { ++ let crc32 = unsafe { imp::mz_crc32_combine(1, 2, 3) }; ++ assert_eq!(crc32, 29518389); ++} diff --cc vendor/flate2-1.0.2/src/gz/bufread.rs index 000000000,000000000..3982e80ad new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/gz/bufread.rs @@@ -1,0 -1,0 +1,556 @@@ ++use std::cmp; ++use std::io; ++use std::io::prelude::*; ++use std::mem; ++ ++use super::{GzBuilder, GzHeader}; ++use super::{FCOMMENT, FEXTRA, FHCRC, FNAME}; ++use crc::CrcReader; ++use deflate; ++use Compression; ++ ++fn copy(into: &mut [u8], from: &[u8], pos: &mut usize) -> usize { ++ let min = cmp::min(into.len(), from.len() - *pos); ++ for (slot, val) in into.iter_mut().zip(from[*pos..*pos + min].iter()) { ++ *slot = *val; ++ } ++ *pos += min; ++ return min; ++} ++ ++pub(crate) fn corrupt() -> io::Error { ++ io::Error::new( ++ io::ErrorKind::InvalidInput, ++ "corrupt gzip stream does not have a matching checksum", ++ ) ++} ++ ++fn bad_header() -> io::Error { ++ io::Error::new(io::ErrorKind::InvalidInput, "invalid gzip header") ++} ++ ++fn read_le_u16(r: &mut R) -> io::Result { ++ let mut b = [0; 2]; ++ r.read_exact(&mut b)?; ++ Ok((b[0] as u16) | ((b[1] as u16) << 8)) ++} ++ ++pub(crate) fn read_gz_header(r: &mut R) -> io::Result { ++ let mut crc_reader = CrcReader::new(r); ++ let mut header = [0; 10]; ++ crc_reader.read_exact(&mut header)?; ++ ++ let id1 = header[0]; ++ let id2 = header[1]; ++ if id1 != 0x1f || id2 != 0x8b { ++ return Err(bad_header()); ++ } ++ let cm = header[2]; ++ if cm != 8 { ++ return Err(bad_header()); ++ } ++ ++ let flg = header[3]; ++ let mtime = ((header[4] as u32) << 0) ++ | ((header[5] as u32) << 8) ++ | ((header[6] as u32) << 16) ++ | ((header[7] as u32) << 24); ++ let _xfl = header[8]; ++ let os = header[9]; ++ ++ let extra = if flg & FEXTRA != 0 { ++ let xlen = read_le_u16(&mut crc_reader)?; ++ let mut extra = vec![0; xlen as usize]; ++ crc_reader.read_exact(&mut extra)?; ++ Some(extra) ++ } else { ++ None ++ }; ++ let filename = if flg & FNAME != 0 { ++ // wow this is slow ++ let mut b = Vec::new(); ++ for byte in crc_reader.by_ref().bytes() { ++ let byte = byte?; ++ if byte == 0 { ++ break; ++ } ++ b.push(byte); ++ } ++ Some(b) ++ } else { ++ None ++ }; ++ let comment = if flg & FCOMMENT != 0 { ++ // wow this is slow ++ let mut b = Vec::new(); ++ for byte in crc_reader.by_ref().bytes() { ++ let byte = byte?; ++ if byte == 0 { ++ break; ++ } ++ b.push(byte); ++ } ++ Some(b) ++ } else { ++ None ++ }; ++ ++ if flg & FHCRC != 0 { ++ let calced_crc = crc_reader.crc().sum() as u16; ++ let stored_crc = read_le_u16(&mut crc_reader)?; ++ if calced_crc != stored_crc { ++ return Err(corrupt()); ++ } ++ } ++ ++ Ok(GzHeader { ++ extra: extra, ++ filename: filename, ++ comment: comment, ++ operating_system: os, ++ mtime: mtime, ++ }) ++} ++ ++/// A gzip streaming encoder ++/// ++/// This structure exposes a [`BufRead`] interface that will read uncompressed data ++/// from the underlying reader and expose the compressed version as a [`BufRead`] ++/// interface. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// use flate2::Compression; ++/// use flate2::bufread::GzEncoder; ++/// use std::fs::File; ++/// use std::io::BufReader; ++/// ++/// // Opens sample file, compresses the contents and returns a Vector or error ++/// // File wrapped in a BufReader implements BufRead ++/// ++/// fn open_hello_world() -> io::Result> { ++/// let f = File::open("examples/hello_world.txt")?; ++/// let b = BufReader::new(f); ++/// let mut gz = GzEncoder::new(b, Compression::fast()); ++/// let mut buffer = Vec::new(); ++/// gz.read_to_end(&mut buffer)?; ++/// Ok(buffer) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct GzEncoder { ++ inner: deflate::bufread::DeflateEncoder>, ++ header: Vec, ++ pos: usize, ++ eof: bool, ++} ++ ++pub fn gz_encoder(header: Vec, r: R, lvl: Compression) -> GzEncoder { ++ let crc = CrcReader::new(r); ++ GzEncoder { ++ inner: deflate::bufread::DeflateEncoder::new(crc, lvl), ++ header: header, ++ pos: 0, ++ eof: false, ++ } ++} ++ ++impl GzEncoder { ++ /// Creates a new encoder which will use the given compression level. ++ /// ++ /// The encoder is not configured specially for the emitted header. For ++ /// header configuration, see the `GzBuilder` type. ++ /// ++ /// The data read from the stream `r` will be compressed and available ++ /// through the returned reader. ++ pub fn new(r: R, level: Compression) -> GzEncoder { ++ GzBuilder::new().buf_read(r, level) ++ } ++ ++ fn read_footer(&mut self, into: &mut [u8]) -> io::Result { ++ if self.pos == 8 { ++ return Ok(0); ++ } ++ let crc = self.inner.get_ref().crc(); ++ let ref arr = [ ++ (crc.sum() >> 0) as u8, ++ (crc.sum() >> 8) as u8, ++ (crc.sum() >> 16) as u8, ++ (crc.sum() >> 24) as u8, ++ (crc.amount() >> 0) as u8, ++ (crc.amount() >> 8) as u8, ++ (crc.amount() >> 16) as u8, ++ (crc.amount() >> 24) as u8, ++ ]; ++ Ok(copy(into, arr, &mut self.pos)) ++ } ++} ++ ++impl GzEncoder { ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying reader. ++ /// ++ /// Note that mutation of the reader may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Returns the underlying stream, consuming this encoder ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for GzEncoder { ++ fn read(&mut self, mut into: &mut [u8]) -> io::Result { ++ let mut amt = 0; ++ if self.eof { ++ return self.read_footer(into); ++ } else if self.pos < self.header.len() { ++ amt += copy(into, &self.header, &mut self.pos); ++ if amt == into.len() { ++ return Ok(amt); ++ } ++ let tmp = into; ++ into = &mut tmp[amt..]; ++ } ++ match self.inner.read(into)? { ++ 0 => { ++ self.eof = true; ++ self.pos = 0; ++ self.read_footer(into) ++ } ++ n => Ok(amt + n), ++ } ++ } ++} ++ ++impl Write for GzEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++/// A gzip streaming decoder ++/// ++/// This structure exposes a [`ReadBuf`] interface that will consume compressed ++/// data from the underlying reader and emit uncompressed data. ++/// ++/// [`ReadBuf`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::GzEncoder; ++/// use flate2::bufread::GzDecoder; ++/// ++/// # fn main() { ++/// # let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements BufRead ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut gz = GzDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// gz.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct GzDecoder { ++ inner: CrcReader>, ++ header: Option>, ++ finished: bool, ++} ++ ++impl GzDecoder { ++ /// Creates a new decoder from the given reader, immediately parsing the ++ /// gzip header. ++ pub fn new(mut r: R) -> GzDecoder { ++ let header = read_gz_header(&mut r); ++ ++ let flate = deflate::bufread::DeflateDecoder::new(r); ++ GzDecoder { ++ inner: CrcReader::new(flate), ++ header: Some(header), ++ finished: false, ++ } ++ } ++ ++ fn finish(&mut self) -> io::Result<()> { ++ if self.finished { ++ return Ok(()); ++ } ++ let ref mut buf = [0u8; 8]; ++ { ++ let mut len = 0; ++ ++ while len < buf.len() { ++ match self.inner.get_mut().get_mut().read(&mut buf[len..])? { ++ 0 => return Err(corrupt()), ++ n => len += n, ++ } ++ } ++ } ++ ++ let crc = ((buf[0] as u32) << 0) ++ | ((buf[1] as u32) << 8) ++ | ((buf[2] as u32) << 16) ++ | ((buf[3] as u32) << 24); ++ let amt = ((buf[4] as u32) << 0) ++ | ((buf[5] as u32) << 8) ++ | ((buf[6] as u32) << 16) ++ | ((buf[7] as u32) << 24); ++ if crc != self.inner.crc().sum() { ++ return Err(corrupt()); ++ } ++ if amt != self.inner.crc().amount() { ++ return Err(corrupt()); ++ } ++ self.finished = true; ++ Ok(()) ++ } ++} ++ ++impl GzDecoder { ++ /// Returns the header associated with this stream, if it was valid ++ pub fn header(&self) -> Option<&GzHeader> { ++ self.header.as_ref().and_then(|h| h.as_ref().ok()) ++ } ++ ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream. ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for GzDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ match self.header { ++ None => return Ok(0), // error already returned, ++ Some(Ok(_)) => {} ++ Some(Err(_)) => match self.header.take().unwrap() { ++ Ok(_) => panic!(), ++ Err(e) => return Err(e), ++ }, ++ } ++ if into.is_empty() { ++ return Ok(0); ++ } ++ match self.inner.read(into)? { ++ 0 => { ++ self.finish()?; ++ Ok(0) ++ } ++ n => Ok(n), ++ } ++ } ++} ++ ++impl Write for GzDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++/// A gzip streaming decoder that decodes all members of a multistream ++/// ++/// A gzip member consists of a header, compressed data and a trailer. The [gzip ++/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple ++/// gzip members to be joined in a single stream. `MultiGzDecoder` will ++/// decode all consecutive members while `GzDecoder` will only decompress ++/// the first gzip member. The multistream format is commonly used in ++/// bioinformatics, for example when using the BGZF compressed data. ++/// ++/// This structure exposes a [`BufRead`] interface that will consume all gzip members ++/// from the underlying reader and emit uncompressed data. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::GzEncoder; ++/// use flate2::bufread::MultiGzDecoder; ++/// ++/// # fn main() { ++/// # let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements BufRead ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut gz = MultiGzDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// gz.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct MultiGzDecoder { ++ inner: CrcReader>, ++ header: io::Result, ++ finished: bool, ++} ++ ++impl MultiGzDecoder { ++ /// Creates a new decoder from the given reader, immediately parsing the ++ /// (first) gzip header. If the gzip stream contains multiple members all will ++ /// be decoded. ++ pub fn new(mut r: R) -> MultiGzDecoder { ++ let header = read_gz_header(&mut r); ++ ++ let flate = deflate::bufread::DeflateDecoder::new(r); ++ MultiGzDecoder { ++ inner: CrcReader::new(flate), ++ header: header, ++ finished: false, ++ } ++ } ++ ++ fn finish_member(&mut self) -> io::Result { ++ if self.finished { ++ return Ok(0); ++ } ++ let ref mut buf = [0u8; 8]; ++ { ++ let mut len = 0; ++ ++ while len < buf.len() { ++ match self.inner.get_mut().get_mut().read(&mut buf[len..])? { ++ 0 => return Err(corrupt()), ++ n => len += n, ++ } ++ } ++ } ++ ++ let crc = ((buf[0] as u32) << 0) ++ | ((buf[1] as u32) << 8) ++ | ((buf[2] as u32) << 16) ++ | ((buf[3] as u32) << 24); ++ let amt = ((buf[4] as u32) << 0) ++ | ((buf[5] as u32) << 8) ++ | ((buf[6] as u32) << 16) ++ | ((buf[7] as u32) << 24); ++ if crc != self.inner.crc().sum() as u32 { ++ return Err(corrupt()); ++ } ++ if amt != self.inner.crc().amount() { ++ return Err(corrupt()); ++ } ++ let remaining = match self.inner.get_mut().get_mut().fill_buf() { ++ Ok(b) => if b.is_empty() { ++ self.finished = true; ++ return Ok(0); ++ } else { ++ b.len() ++ }, ++ Err(e) => return Err(e), ++ }; ++ ++ let next_header = read_gz_header(self.inner.get_mut().get_mut()); ++ drop(mem::replace(&mut self.header, next_header)); ++ self.inner.reset(); ++ self.inner.get_mut().reset_data(); ++ ++ Ok(remaining) ++ } ++} ++ ++impl MultiGzDecoder { ++ /// Returns the current header associated with this stream, if it's valid ++ pub fn header(&self) -> Option<&GzHeader> { ++ self.header.as_ref().ok() ++ } ++ ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream. ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for MultiGzDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ if let Err(ref mut e) = self.header { ++ let another_error = io::ErrorKind::Other.into(); ++ return Err(mem::replace(e, another_error)); ++ } ++ match self.inner.read(into)? { ++ 0 => match self.finish_member() { ++ Ok(0) => Ok(0), ++ Ok(_) => self.read(into), ++ Err(e) => Err(e), ++ }, ++ n => Ok(n), ++ } ++ } ++} ++ ++impl Write for MultiGzDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} diff --cc vendor/flate2-1.0.2/src/gz/mod.rs index 000000000,000000000..5fcbe50d1 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/gz/mod.rs @@@ -1,0 -1,0 +1,359 @@@ ++use std::ffi::CString; ++use std::io::prelude::*; ++use std::time; ++ ++use Compression; ++use bufreader::BufReader; ++ ++pub static FHCRC: u8 = 1 << 1; ++pub static FEXTRA: u8 = 1 << 2; ++pub static FNAME: u8 = 1 << 3; ++pub static FCOMMENT: u8 = 1 << 4; ++ ++pub mod bufread; ++pub mod read; ++pub mod write; ++ ++/// A structure representing the header of a gzip stream. ++/// ++/// The header can contain metadata about the file that was compressed, if ++/// present. ++#[derive(PartialEq, Clone, Debug)] ++pub struct GzHeader { ++ extra: Option>, ++ filename: Option>, ++ comment: Option>, ++ operating_system: u8, ++ mtime: u32, ++} ++ ++impl GzHeader { ++ /// Returns the `filename` field of this gzip stream's header, if present. ++ pub fn filename(&self) -> Option<&[u8]> { ++ self.filename.as_ref().map(|s| &s[..]) ++ } ++ ++ /// Returns the `extra` field of this gzip stream's header, if present. ++ pub fn extra(&self) -> Option<&[u8]> { ++ self.extra.as_ref().map(|s| &s[..]) ++ } ++ ++ /// Returns the `comment` field of this gzip stream's header, if present. ++ pub fn comment(&self) -> Option<&[u8]> { ++ self.comment.as_ref().map(|s| &s[..]) ++ } ++ ++ /// Returns the `operating_system` field of this gzip stream's header. ++ /// ++ /// There are predefined values for various operating systems. ++ /// 255 means that the value is unknown. ++ pub fn operating_system(&self) -> u8 { ++ self.operating_system ++ } ++ ++ /// This gives the most recent modification time of the original file being compressed. ++ /// ++ /// The time is in Unix format, i.e., seconds since 00:00:00 GMT, Jan. 1, 1970. ++ /// (Note that this may cause problems for MS-DOS and other systems that use local ++ /// rather than Universal time.) If the compressed data did not come from a file, ++ /// `mtime` is set to the time at which compression started. ++ /// `mtime` = 0 means no time stamp is available. ++ /// ++ /// The usage of `mtime` is discouraged because of Year 2038 problem. ++ pub fn mtime(&self) -> u32 { ++ self.mtime ++ } ++ ++ /// Returns the most recent modification time represented by a date-time type. ++ /// Returns `None` if the value of the underlying counter is 0, ++ /// indicating no time stamp is available. ++ /// ++ /// ++ /// The time is measured as seconds since 00:00:00 GMT, Jan. 1 1970. ++ /// See [`mtime`](#method.mtime) for more detail. ++ pub fn mtime_as_datetime(&self) -> Option { ++ if self.mtime == 0 { ++ None ++ } else { ++ let duration = time::Duration::new(u64::from(self.mtime), 0); ++ let datetime = time::UNIX_EPOCH + duration; ++ Some(datetime) ++ } ++ } ++} ++ ++/// A builder structure to create a new gzip Encoder. ++/// ++/// This structure controls header configuration options such as the filename. ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// # use std::io; ++/// use std::fs::File; ++/// use flate2::GzBuilder; ++/// use flate2::Compression; ++/// ++/// // GzBuilder opens a file and writes a sample string using GzBuilder pattern ++/// ++/// # fn sample_builder() -> Result<(), io::Error> { ++/// let f = File::create("examples/hello_world.gz")?; ++/// let mut gz = GzBuilder::new() ++/// .filename("hello_world.txt") ++/// .comment("test file, please delete") ++/// .write(f, Compression::default()); ++/// gz.write_all(b"hello world")?; ++/// gz.finish()?; ++/// # Ok(()) ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct GzBuilder { ++ extra: Option>, ++ filename: Option, ++ comment: Option, ++ operating_system: Option, ++ mtime: u32, ++} ++ ++impl GzBuilder { ++ /// Create a new blank builder with no header by default. ++ pub fn new() -> GzBuilder { ++ GzBuilder { ++ extra: None, ++ filename: None, ++ comment: None, ++ operating_system: None, ++ mtime: 0, ++ } ++ } ++ ++ /// Configure the `mtime` field in the gzip header. ++ pub fn mtime(mut self, mtime: u32) -> GzBuilder { ++ self.mtime = mtime; ++ self ++ } ++ ++ /// Configure the `operating_system` field in the gzip header. ++ pub fn operating_system(mut self, os: u8) -> GzBuilder { ++ self.operating_system = Some(os); ++ self ++ } ++ ++ /// Configure the `extra` field in the gzip header. ++ pub fn extra>>(mut self, extra: T) -> GzBuilder { ++ self.extra = Some(extra.into()); ++ self ++ } ++ ++ /// Configure the `filename` field in the gzip header. ++ /// ++ /// # Panics ++ /// ++ /// Panics if the `filename` slice contains a zero. ++ pub fn filename>>(mut self, filename: T) -> GzBuilder { ++ self.filename = Some(CString::new(filename.into()).unwrap()); ++ self ++ } ++ ++ /// Configure the `comment` field in the gzip header. ++ /// ++ /// # Panics ++ /// ++ /// Panics if the `comment` slice contains a zero. ++ pub fn comment>>(mut self, comment: T) -> GzBuilder { ++ self.comment = Some(CString::new(comment.into()).unwrap()); ++ self ++ } ++ ++ /// Consume this builder, creating a writer encoder in the process. ++ /// ++ /// The data written to the returned encoder will be compressed and then ++ /// written out to the supplied parameter `w`. ++ pub fn write(self, w: W, lvl: Compression) -> write::GzEncoder { ++ write::gz_encoder(self.into_header(lvl), w, lvl) ++ } ++ ++ /// Consume this builder, creating a reader encoder in the process. ++ /// ++ /// Data read from the returned encoder will be the compressed version of ++ /// the data read from the given reader. ++ pub fn read(self, r: R, lvl: Compression) -> read::GzEncoder { ++ read::gz_encoder(self.buf_read(BufReader::new(r), lvl)) ++ } ++ ++ /// Consume this builder, creating a reader encoder in the process. ++ /// ++ /// Data read from the returned encoder will be the compressed version of ++ /// the data read from the given reader. ++ pub fn buf_read(self, r: R, lvl: Compression) -> bufread::GzEncoder ++ where ++ R: BufRead, ++ { ++ bufread::gz_encoder(self.into_header(lvl), r, lvl) ++ } ++ ++ fn into_header(self, lvl: Compression) -> Vec { ++ let GzBuilder { ++ extra, ++ filename, ++ comment, ++ operating_system, ++ mtime, ++ } = self; ++ let mut flg = 0; ++ let mut header = vec![0u8; 10]; ++ match extra { ++ Some(v) => { ++ flg |= FEXTRA; ++ header.push((v.len() >> 0) as u8); ++ header.push((v.len() >> 8) as u8); ++ header.extend(v); ++ } ++ None => {} ++ } ++ match filename { ++ Some(filename) => { ++ flg |= FNAME; ++ header.extend(filename.as_bytes_with_nul().iter().map(|x| *x)); ++ } ++ None => {} ++ } ++ match comment { ++ Some(comment) => { ++ flg |= FCOMMENT; ++ header.extend(comment.as_bytes_with_nul().iter().map(|x| *x)); ++ } ++ None => {} ++ } ++ header[0] = 0x1f; ++ header[1] = 0x8b; ++ header[2] = 8; ++ header[3] = flg; ++ header[4] = (mtime >> 0) as u8; ++ header[5] = (mtime >> 8) as u8; ++ header[6] = (mtime >> 16) as u8; ++ header[7] = (mtime >> 24) as u8; ++ header[8] = if lvl.0 >= Compression::best().0 { ++ 2 ++ } else if lvl.0 <= Compression::fast().0 { ++ 4 ++ } else { ++ 0 ++ }; ++ ++ // Typically this byte indicates what OS the gz stream was created on, ++ // but in an effort to have cross-platform reproducible streams just ++ // default this value to 255. I'm not sure that if we "correctly" set ++ // this it'd do anything anyway... ++ header[9] = operating_system.unwrap_or(255); ++ return header; ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ ++ use super::{read, write, GzBuilder}; ++ use Compression; ++ use rand::{thread_rng, Rng}; ++ ++ #[test] ++ fn roundtrip() { ++ let mut e = write::GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"foo bar baz").unwrap(); ++ let inner = e.finish().unwrap(); ++ let mut d = read::GzDecoder::new(&inner[..]); ++ let mut s = String::new(); ++ d.read_to_string(&mut s).unwrap(); ++ assert_eq!(s, "foo bar baz"); ++ } ++ ++ #[test] ++ fn roundtrip_zero() { ++ let e = write::GzEncoder::new(Vec::new(), Compression::default()); ++ let inner = e.finish().unwrap(); ++ let mut d = read::GzDecoder::new(&inner[..]); ++ let mut s = String::new(); ++ d.read_to_string(&mut s).unwrap(); ++ assert_eq!(s, ""); ++ } ++ ++ #[test] ++ fn roundtrip_big() { ++ let mut real = Vec::new(); ++ let mut w = write::GzEncoder::new(Vec::new(), Compression::default()); ++ let v = ::random_bytes().take(1024).collect::>(); ++ for _ in 0..200 { ++ let to_write = &v[..thread_rng().gen_range(0, v.len())]; ++ real.extend(to_write.iter().map(|x| *x)); ++ w.write_all(to_write).unwrap(); ++ } ++ let result = w.finish().unwrap(); ++ let mut r = read::GzDecoder::new(&result[..]); ++ let mut v = Vec::new(); ++ r.read_to_end(&mut v).unwrap(); ++ assert!(v == real); ++ } ++ ++ #[test] ++ fn roundtrip_big2() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut r = read::GzDecoder::new(read::GzEncoder::new(&v[..], Compression::default())); ++ let mut res = Vec::new(); ++ r.read_to_end(&mut res).unwrap(); ++ assert!(res == v); ++ } ++ ++ #[test] ++ fn fields() { ++ let r = vec![0, 2, 4, 6]; ++ let e = GzBuilder::new() ++ .filename("foo.rs") ++ .comment("bar") ++ .extra(vec![0, 1, 2, 3]) ++ .read(&r[..], Compression::default()); ++ let mut d = read::GzDecoder::new(e); ++ assert_eq!(d.header().unwrap().filename(), Some(&b"foo.rs"[..])); ++ assert_eq!(d.header().unwrap().comment(), Some(&b"bar"[..])); ++ assert_eq!(d.header().unwrap().extra(), Some(&b"\x00\x01\x02\x03"[..])); ++ let mut res = Vec::new(); ++ d.read_to_end(&mut res).unwrap(); ++ assert_eq!(res, vec![0, 2, 4, 6]); ++ } ++ ++ #[test] ++ fn keep_reading_after_end() { ++ let mut e = write::GzEncoder::new(Vec::new(), Compression::default()); ++ e.write_all(b"foo bar baz").unwrap(); ++ let inner = e.finish().unwrap(); ++ let mut d = read::GzDecoder::new(&inner[..]); ++ let mut s = String::new(); ++ d.read_to_string(&mut s).unwrap(); ++ assert_eq!(s, "foo bar baz"); ++ d.read_to_string(&mut s).unwrap(); ++ assert_eq!(s, "foo bar baz"); ++ } ++ ++ #[test] ++ fn qc_reader() { ++ ::quickcheck::quickcheck(test as fn(_) -> _); ++ ++ fn test(v: Vec) -> bool { ++ let r = read::GzEncoder::new(&v[..], Compression::default()); ++ let mut r = read::GzDecoder::new(r); ++ let mut v2 = Vec::new(); ++ r.read_to_end(&mut v2).unwrap(); ++ v == v2 ++ } ++ } ++ ++ #[test] ++ fn flush_after_write() { ++ let mut f = write::GzEncoder::new(Vec::new(), Compression::default()); ++ write!(f, "Hello world").unwrap(); ++ f.flush().unwrap(); ++ } ++} diff --cc vendor/flate2-1.0.2/src/gz/read.rs index 000000000,000000000..0ffb9f6a4 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/gz/read.rs @@@ -1,0 -1,0 +1,278 @@@ ++use std::io::prelude::*; ++use std::io; ++ ++use super::{GzBuilder, GzHeader}; ++use Compression; ++use bufreader::BufReader; ++use super::bufread; ++ ++/// A gzip streaming encoder ++/// ++/// This structure exposes a [`Read`] interface that will read uncompressed data ++/// from the underlying reader and expose the compressed version as a [`Read`] ++/// interface. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// use flate2::Compression; ++/// use flate2::read::GzEncoder; ++/// ++/// // Return a vector containing the GZ compressed version of hello world ++/// ++/// fn gzencode_hello_world() -> io::Result> { ++/// let mut ret_vec = [0;100]; ++/// let bytestring = b"hello world"; ++/// let mut gz = GzEncoder::new(&bytestring[..], Compression::fast()); ++/// let count = gz.read(&mut ret_vec)?; ++/// Ok(ret_vec[0..count].to_vec()) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct GzEncoder { ++ inner: bufread::GzEncoder>, ++} ++ ++pub fn gz_encoder(inner: bufread::GzEncoder>) -> GzEncoder { ++ GzEncoder { inner: inner } ++} ++ ++impl GzEncoder { ++ /// Creates a new encoder which will use the given compression level. ++ /// ++ /// The encoder is not configured specially for the emitted header. For ++ /// header configuration, see the `GzBuilder` type. ++ /// ++ /// The data read from the stream `r` will be compressed and available ++ /// through the returned reader. ++ pub fn new(r: R, level: Compression) -> GzEncoder { ++ GzBuilder::new().read(r, level) ++ } ++} ++ ++impl GzEncoder { ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying reader. ++ /// ++ /// Note that mutation of the reader may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Returns the underlying stream, consuming this encoder ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for GzEncoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ self.inner.read(into) ++ } ++} ++ ++impl Write for GzEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++/// A gzip streaming decoder ++/// ++/// This structure exposes a [`Read`] interface that will consume compressed ++/// data from the underlying reader and emit uncompressed data. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::GzEncoder; ++/// use flate2::read::GzDecoder; ++/// ++/// # fn main() { ++/// # let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements Read ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut gz = GzDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// gz.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct GzDecoder { ++ inner: bufread::GzDecoder>, ++} ++ ++impl GzDecoder { ++ /// Creates a new decoder from the given reader, immediately parsing the ++ /// gzip header. ++ pub fn new(r: R) -> GzDecoder { ++ GzDecoder { ++ inner: bufread::GzDecoder::new(BufReader::new(r)), ++ } ++ } ++} ++ ++impl GzDecoder { ++ /// Returns the header associated with this stream, if it was valid. ++ pub fn header(&self) -> Option<&GzHeader> { ++ self.inner.header() ++ } ++ ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream. ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for GzDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ self.inner.read(into) ++ } ++} ++ ++impl Write for GzDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++/// A gzip streaming decoder that decodes all members of a multistream ++/// ++/// A gzip member consists of a header, compressed data and a trailer. The [gzip ++/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple ++/// gzip members to be joined in a single stream. `MultiGzDecoder` will ++/// decode all consecutive members while `GzDecoder` will only decompress the ++/// first gzip member. The multistream format is commonly used in bioinformatics, ++/// for example when using the BGZF compressed data. ++/// ++/// This structure exposes a [`Read`] interface that will consume all gzip members ++/// from the underlying reader and emit uncompressed data. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::GzEncoder; ++/// use flate2::read::MultiGzDecoder; ++/// ++/// # fn main() { ++/// # let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements Read ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut gz = MultiGzDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// gz.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct MultiGzDecoder { ++ inner: bufread::MultiGzDecoder>, ++} ++ ++impl MultiGzDecoder { ++ /// Creates a new decoder from the given reader, immediately parsing the ++ /// (first) gzip header. If the gzip stream contains multiple members all will ++ /// be decoded. ++ pub fn new(r: R) -> MultiGzDecoder { ++ MultiGzDecoder { ++ inner: bufread::MultiGzDecoder::new(BufReader::new(r)), ++ } ++ } ++} ++ ++impl MultiGzDecoder { ++ /// Returns the current header associated with this stream, if it's valid. ++ pub fn header(&self) -> Option<&GzHeader> { ++ self.inner.header() ++ } ++ ++ /// Acquires a reference to the underlying reader. ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream. ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++} ++ ++impl Read for MultiGzDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ self.inner.read(into) ++ } ++} ++ ++impl Write for MultiGzDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} diff --cc vendor/flate2-1.0.2/src/gz/write.rs index 000000000,000000000..39f24a6a1 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/gz/write.rs @@@ -1,0 -1,0 +1,480 @@@ ++use std::cmp; ++use std::io; ++use std::io::prelude::*; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use super::bufread::{corrupt, read_gz_header}; ++use super::{GzBuilder, GzHeader}; ++use crc::{Crc, CrcWriter}; ++use zio; ++use {Compress, Compression, Decompress, Status}; ++ ++/// A gzip streaming encoder ++/// ++/// This structure exposes a [`Write`] interface that will emit compressed data ++/// to the underlying writer `W`. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use flate2::Compression; ++/// use flate2::write::GzEncoder; ++/// ++/// // Vec implements Write to print the compressed bytes of sample string ++/// # fn main() { ++/// ++/// let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// e.write_all(b"Hello World").unwrap(); ++/// println!("{:?}", e.finish().unwrap()); ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct GzEncoder { ++ inner: zio::Writer, ++ crc: Crc, ++ crc_bytes_written: usize, ++ header: Vec, ++} ++ ++pub fn gz_encoder(header: Vec, w: W, lvl: Compression) -> GzEncoder { ++ GzEncoder { ++ inner: zio::Writer::new(w, Compress::new(lvl, false)), ++ crc: Crc::new(), ++ header: header, ++ crc_bytes_written: 0, ++ } ++} ++ ++impl GzEncoder { ++ /// Creates a new encoder which will use the given compression level. ++ /// ++ /// The encoder is not configured specially for the emitted header. For ++ /// header configuration, see the `GzBuilder` type. ++ /// ++ /// The data written to the returned encoder will be compressed and then ++ /// written to the stream `w`. ++ pub fn new(w: W, level: Compression) -> GzEncoder { ++ GzBuilder::new().write(w, level) ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutation of the writer may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut() ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.write_header()?; ++ self.inner.finish()?; ++ ++ while self.crc_bytes_written < 8 { ++ let (sum, amt) = (self.crc.sum() as u32, self.crc.amount()); ++ let buf = [ ++ (sum >> 0) as u8, ++ (sum >> 8) as u8, ++ (sum >> 16) as u8, ++ (sum >> 24) as u8, ++ (amt >> 0) as u8, ++ (amt >> 8) as u8, ++ (amt >> 16) as u8, ++ (amt >> 24) as u8, ++ ]; ++ let inner = self.inner.get_mut(); ++ let n = inner.write(&buf[self.crc_bytes_written..])?; ++ self.crc_bytes_written += n; ++ } ++ Ok(()) ++ } ++ ++ /// Finish encoding this stream, returning the underlying writer once the ++ /// encoding is done. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.try_finish()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ fn write_header(&mut self) -> io::Result<()> { ++ while self.header.len() > 0 { ++ let n = self.inner.get_mut().write(&self.header)?; ++ self.header.drain(..n); ++ } ++ Ok(()) ++ } ++} ++ ++impl Write for GzEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ assert_eq!(self.crc_bytes_written, 0); ++ self.write_header()?; ++ let n = self.inner.write(buf)?; ++ self.crc.update(&buf[..n]); ++ Ok(n) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ assert_eq!(self.crc_bytes_written, 0); ++ self.write_header()?; ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for GzEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.try_finish()); ++ self.get_mut().shutdown() ++ } ++} ++ ++impl Read for GzEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for GzEncoder {} ++ ++impl Drop for GzEncoder { ++ fn drop(&mut self) { ++ if self.inner.is_present() { ++ let _ = self.try_finish(); ++ } ++ } ++} ++ ++/// A gzip streaming decoder ++/// ++/// This structure exposes a [`Write`] interface that will emit compressed data ++/// to the underlying writer `W`. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// use flate2::Compression; ++/// use flate2::write::{GzEncoder, GzDecoder}; ++/// ++/// # fn main() { ++/// # let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++/// # e.write(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # assert_eq!("Hello World", decode_writer(bytes).unwrap()); ++/// # } ++/// // Uncompresses a gzip encoded vector of bytes and returns a string or error ++/// // Here Vec implements Write ++/// fn decode_writer(bytes: Vec) -> io::Result { ++/// let mut writer = Vec::new(); ++/// let mut decoder = GzDecoder::new(writer); ++/// decoder.write_all(&bytes[..])?; ++/// writer = decoder.finish()?; ++/// let return_string = String::from_utf8(writer).expect("String parsing error"); ++/// Ok(return_string) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct GzDecoder { ++ inner: zio::Writer, Decompress>, ++ crc_bytes: Vec, ++ header: Option, ++ header_buf: Vec, ++} ++ ++const CRC_BYTES_LEN: usize = 8; ++ ++impl GzDecoder { ++ /// Creates a new decoder which will write uncompressed data to the stream. ++ /// ++ /// When this encoder is dropped or unwrapped the final pieces of data will ++ /// be flushed. ++ pub fn new(w: W) -> GzDecoder { ++ GzDecoder { ++ inner: zio::Writer::new(CrcWriter::new(w), Decompress::new(false)), ++ crc_bytes: Vec::with_capacity(CRC_BYTES_LEN), ++ header: None, ++ header_buf: Vec::new(), ++ } ++ } ++ ++ /// Returns the header associated with this stream. ++ pub fn header(&self) -> Option<&GzHeader> { ++ self.header.as_ref() ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutating the output/input state of the stream may corrupt this ++ /// object, so care must be taken when using this method. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to finish the stream, returning any ++ /// errors which happen. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.finish_and_check_crc()?; ++ Ok(()) ++ } ++ ++ /// Consumes this decoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream and then return the contained ++ /// writer if the flush succeeded. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.finish_and_check_crc()?; ++ Ok(self.inner.take_inner().into_inner()) ++ } ++ ++ fn finish_and_check_crc(&mut self) -> io::Result<()> { ++ self.inner.finish()?; ++ ++ if self.crc_bytes.len() != 8 { ++ return Err(corrupt()); ++ } ++ ++ let crc = ((self.crc_bytes[0] as u32) << 0) ++ | ((self.crc_bytes[1] as u32) << 8) ++ | ((self.crc_bytes[2] as u32) << 16) ++ | ((self.crc_bytes[3] as u32) << 24); ++ let amt = ((self.crc_bytes[4] as u32) << 0) ++ | ((self.crc_bytes[5] as u32) << 8) ++ | ((self.crc_bytes[6] as u32) << 16) ++ | ((self.crc_bytes[7] as u32) << 24); ++ if crc != self.inner.get_ref().crc().sum() as u32 { ++ return Err(corrupt()); ++ } ++ if amt != self.inner.get_ref().crc().amount() { ++ return Err(corrupt()); ++ } ++ Ok(()) ++ } ++} ++ ++struct Counter { ++ inner: T, ++ pos: usize, ++} ++ ++impl Read for Counter { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ let pos = self.inner.read(buf)?; ++ self.pos += pos; ++ Ok(pos) ++ } ++} ++ ++impl Write for GzDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ if self.header.is_none() { ++ // trying to avoid buffer usage ++ let (res, pos) = { ++ let mut counter = Counter { ++ inner: self.header_buf.chain(buf), ++ pos: 0, ++ }; ++ let res = read_gz_header(&mut counter); ++ (res, counter.pos) ++ }; ++ ++ match res { ++ Err(err) => { ++ if err.kind() == io::ErrorKind::UnexpectedEof { ++ // not enough data for header, save to the buffer ++ self.header_buf.extend(buf); ++ Ok(buf.len()) ++ } else { ++ Err(err) ++ } ++ } ++ Ok(header) => { ++ self.header = Some(header); ++ let pos = pos - self.header_buf.len(); ++ self.header_buf.truncate(0); ++ Ok(pos) ++ } ++ } ++ } else { ++ let (n, status) = self.inner.write_with_status(buf)?; ++ ++ if status == Status::StreamEnd { ++ if n < buf.len() && self.crc_bytes.len() < 8 { ++ let remaining = buf.len() - n; ++ let crc_bytes = cmp::min(remaining, CRC_BYTES_LEN - self.crc_bytes.len()); ++ self.crc_bytes.extend(&buf[n..n + crc_bytes]); ++ return Ok(n + crc_bytes); ++ } ++ } ++ Ok(n) ++ } ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for GzDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.try_finish()); ++ self.inner.get_mut().get_mut().shutdown() ++ } ++} ++ ++impl Read for GzDecoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.get_mut().get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for GzDecoder {} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ const STR: &'static str = "Hello World Hello World Hello World Hello World Hello World \ ++ Hello World Hello World Hello World Hello World Hello World \ ++ Hello World Hello World Hello World Hello World Hello World \ ++ Hello World Hello World Hello World Hello World Hello World \ ++ Hello World Hello World Hello World Hello World Hello World"; ++ ++ #[test] ++ fn decode_writer_one_chunk() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write(STR.as_ref()).unwrap(); ++ let bytes = e.finish().unwrap(); ++ ++ let mut writer = Vec::new(); ++ let mut decoder = GzDecoder::new(writer); ++ let n = decoder.write(&bytes[..]).unwrap(); ++ decoder.write(&bytes[n..]).unwrap(); ++ decoder.try_finish().unwrap(); ++ writer = decoder.finish().unwrap(); ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ assert_eq!(return_string, STR); ++ } ++ ++ #[test] ++ fn decode_writer_partial_header() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write(STR.as_ref()).unwrap(); ++ let bytes = e.finish().unwrap(); ++ ++ let mut writer = Vec::new(); ++ let mut decoder = GzDecoder::new(writer); ++ assert_eq!(decoder.write(&bytes[..5]).unwrap(), 5); ++ let n = decoder.write(&bytes[5..]).unwrap(); ++ if n < bytes.len() - 5 { ++ decoder.write(&bytes[n + 5..]).unwrap(); ++ } ++ writer = decoder.finish().unwrap(); ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ assert_eq!(return_string, STR); ++ } ++ ++ #[test] ++ fn decode_writer_exact_header() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write(STR.as_ref()).unwrap(); ++ let bytes = e.finish().unwrap(); ++ ++ let mut writer = Vec::new(); ++ let mut decoder = GzDecoder::new(writer); ++ assert_eq!(decoder.write(&bytes[..10]).unwrap(), 10); ++ decoder.write(&bytes[10..]).unwrap(); ++ writer = decoder.finish().unwrap(); ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ assert_eq!(return_string, STR); ++ } ++ ++ #[test] ++ fn decode_writer_partial_crc() { ++ let mut e = GzEncoder::new(Vec::new(), Compression::default()); ++ e.write(STR.as_ref()).unwrap(); ++ let bytes = e.finish().unwrap(); ++ ++ let mut writer = Vec::new(); ++ let mut decoder = GzDecoder::new(writer); ++ let l = bytes.len() - 5; ++ let n = decoder.write(&bytes[..l]).unwrap(); ++ decoder.write(&bytes[n..]).unwrap(); ++ writer = decoder.finish().unwrap(); ++ let return_string = String::from_utf8(writer).expect("String parsing error"); ++ assert_eq!(return_string, STR); ++ } ++ ++} diff --cc vendor/flate2-1.0.2/src/lib.rs index 000000000,000000000..810dc090d new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/lib.rs @@@ -1,0 -1,0 +1,214 @@@ ++//! A DEFLATE-based stream compression/decompression library ++//! ++//! This library is meant to supplement/replace the ++//! `flate` library that was previously part of the standard rust distribution ++//! providing a streaming encoder/decoder rather than purely ++//! an in-memory encoder/decoder. ++//! ++//! Like with [`flate`], flate2 is based on [`miniz.c`][1] ++//! ++//! [1]: https://github.com/richgel999/miniz ++//! [`flate`]: https://github.com/rust-lang/rust/tree/1.19.0/src/libflate ++//! ++//! # Organization ++//! ++//! This crate consists mainly of three modules, [`read`], [`write`], and ++//! [`bufread`]. Each module contains a number of types used to encode and ++//! decode various streams of data. All types in the [`write`] module work on ++//! instances of [`Write`][write], whereas all types in the [`read`] module work on ++//! instances of [`Read`][read] and [`bufread`] works with [`BufRead`][bufread]. ++//! ++//! ``` ++//! use flate2::write::GzEncoder; ++//! use flate2::Compression; ++//! use std::io; ++//! use std::io::prelude::*; ++//! ++//! # fn main() { let _ = run(); } ++//! # fn run() -> io::Result<()> { ++//! let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); ++//! encoder.write_all(b"Example")?; ++//! # Ok(()) ++//! # } ++//! ``` ++//! ++//! ++//! Other various types are provided at the top-level of the crate for ++//! management and dealing with encoders/decoders. Also note that types which ++//! operate over a specific trait often implement the mirroring trait as well. ++//! For example a `flate2::read::DeflateDecoder` *also* implements the ++//! `Write` trait if `T: Write`. That is, the "dual trait" is forwarded directly ++//! to the underlying object if available. ++//! ++//! [`read`]: read/index.html ++//! [`bufread`]: bufread/index.html ++//! [`write`]: write/index.html ++//! [read]: https://doc.rust-lang.org/std/io/trait.Read.html ++//! [write]: https://doc.rust-lang.org/std/io/trait.Write.html ++//! [bufread]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++//! ++//! # Async I/O ++//! ++//! This crate optionally can support async I/O streams with the [Tokio stack] via ++//! the `tokio` feature of this crate: ++//! ++//! [Tokio stack]: https://tokio.rs/ ++//! ++//! ```toml ++//! flate2 = { version = "0.2", features = ["tokio"] } ++//! ``` ++//! ++//! All methods are internally capable of working with streams that may return ++//! [`ErrorKind::WouldBlock`] when they're not ready to perform the particular ++//! operation. ++//! ++//! [`ErrorKind::WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html ++//! ++//! Note that care needs to be taken when using these objects, however. The ++//! Tokio runtime, in particular, requires that data is fully flushed before ++//! dropping streams. For compatibility with blocking streams all streams are ++//! flushed/written when they are dropped, and this is not always a suitable ++//! time to perform I/O. If I/O streams are flushed before drop, however, then ++//! these operations will be a noop. ++#![doc(html_root_url = "https://docs.rs/flate2/0.2")] ++#![deny(missing_docs)] ++#![deny(missing_debug_implementations)] ++#![allow(trivial_numeric_casts)] ++#![cfg_attr(test, deny(warnings))] ++ ++#[cfg(feature = "tokio")] ++extern crate futures; ++extern crate libc; ++#[cfg(test)] ++extern crate quickcheck; ++#[cfg(test)] ++extern crate rand; ++#[cfg(feature = "tokio")] ++#[macro_use] ++extern crate tokio_io; ++ ++pub use gz::GzBuilder; ++pub use gz::GzHeader; ++pub use mem::{Compress, CompressError, Decompress, DecompressError, Status}; ++pub use mem::{FlushCompress, FlushDecompress}; ++pub use crc::{Crc, CrcReader, CrcWriter}; ++ ++mod bufreader; ++mod crc; ++mod deflate; ++mod ffi; ++mod gz; ++mod zio; ++mod mem; ++mod zlib; ++ ++/// Types which operate over [`Read`] streams, both encoders and decoders for ++/// various formats. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++pub mod read { ++ pub use deflate::read::DeflateEncoder; ++ pub use deflate::read::DeflateDecoder; ++ pub use zlib::read::ZlibEncoder; ++ pub use zlib::read::ZlibDecoder; ++ pub use gz::read::GzEncoder; ++ pub use gz::read::GzDecoder; ++ pub use gz::read::MultiGzDecoder; ++} ++ ++/// Types which operate over [`Write`] streams, both encoders and decoders for ++/// various formats. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++pub mod write { ++ pub use deflate::write::DeflateEncoder; ++ pub use deflate::write::DeflateDecoder; ++ pub use zlib::write::ZlibEncoder; ++ pub use zlib::write::ZlibDecoder; ++ pub use gz::write::GzEncoder; ++ pub use gz::write::GzDecoder; ++} ++ ++/// Types which operate over [`BufRead`] streams, both encoders and decoders for ++/// various formats. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++pub mod bufread { ++ pub use deflate::bufread::DeflateEncoder; ++ pub use deflate::bufread::DeflateDecoder; ++ pub use zlib::bufread::ZlibEncoder; ++ pub use zlib::bufread::ZlibDecoder; ++ pub use gz::bufread::GzEncoder; ++ pub use gz::bufread::GzDecoder; ++ pub use gz::bufread::MultiGzDecoder; ++} ++ ++fn _assert_send_sync() { ++ fn _assert_send_sync() {} ++ ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>(); ++ _assert_send_sync::>>(); ++ _assert_send_sync::>>(); ++ _assert_send_sync::>>(); ++ _assert_send_sync::>>(); ++ _assert_send_sync::>>(); ++ _assert_send_sync::>>(); ++} ++ ++/// When compressing data, the compression level can be specified by a value in ++/// this enum. ++#[derive(Copy, Clone, PartialEq, Eq, Debug)] ++pub struct Compression(u32); ++ ++impl Compression { ++ /// Creates a new description of the compression level with an explicitly ++ /// specified integer. ++ /// ++ /// The integer here is typically on a scale of 0-9 where 0 means "no ++ /// compression" and 9 means "take as long as you'd like". ++ pub fn new(level: u32) -> Compression { ++ Compression(level) ++ } ++ ++ /// No compression is to be performed, this may actually inflate data ++ /// slightly when encoding. ++ pub fn none() -> Compression { ++ Compression(0) ++ } ++ ++ /// Optimize for the best speed of encoding. ++ pub fn fast() -> Compression { ++ Compression(1) ++ } ++ ++ /// Optimize for the size of data being encoded. ++ pub fn best() -> Compression { ++ Compression(9) ++ } ++ ++ /// Returns an integer representing the compression level, typically on a ++ /// scale of 0-9 ++ pub fn level(&self) -> u32 { ++ self.0 ++ } ++} ++ ++impl Default for Compression { ++ fn default() -> Compression { ++ Compression(6) ++ } ++} ++ ++#[cfg(test)] ++fn random_bytes() -> impl Iterator { ++ use std::iter; ++ use rand::Rng; ++ ++ iter::repeat(()).map(|_| rand::thread_rng().gen()) ++} diff --cc vendor/flate2-1.0.2/src/mem.rs index 000000000,000000000..91c606aaf new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/mem.rs @@@ -1,0 -1,0 +1,737 @@@ ++use std::error::Error; ++use std::fmt; ++use std::io; ++use std::marker; ++use std::slice; ++ ++use libc::{c_int, c_uint}; ++ ++use ffi; ++use Compression; ++ ++/// Raw in-memory compression stream for blocks of data. ++/// ++/// This type is the building block for the I/O streams in the rest of this ++/// crate. It requires more management than the [`Read`]/[`Write`] API but is ++/// maximally flexible in terms of accepting input from any source and being ++/// able to produce output to any memory location. ++/// ++/// It is recommended to use the I/O stream adaptors over this type as they're ++/// easier to use. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++#[derive(Debug)] ++pub struct Compress { ++ inner: Stream, ++} ++ ++/// Raw in-memory decompression stream for blocks of data. ++/// ++/// This type is the building block for the I/O streams in the rest of this ++/// crate. It requires more management than the [`Read`]/[`Write`] API but is ++/// maximally flexible in terms of accepting input from any source and being ++/// able to produce output to any memory location. ++/// ++/// It is recommended to use the I/O stream adaptors over this type as they're ++/// easier to use. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++#[derive(Debug)] ++pub struct Decompress { ++ inner: Stream, ++} ++ ++#[derive(Debug)] ++struct Stream { ++ stream_wrapper: ffi::StreamWrapper, ++ total_in: u64, ++ total_out: u64, ++ _marker: marker::PhantomData, ++} ++ ++unsafe impl Send for Stream {} ++unsafe impl Sync for Stream {} ++ ++trait Direction { ++ unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int; ++} ++ ++#[derive(Debug)] ++enum DirCompress {} ++#[derive(Debug)] ++enum DirDecompress {} ++ ++#[derive(Copy, Clone, PartialEq, Eq, Debug)] ++/// Values which indicate the form of flushing to be used when compressing ++/// in-memory data. ++pub enum FlushCompress { ++ /// A typical parameter for passing to compression/decompression functions, ++ /// this indicates that the underlying stream to decide how much data to ++ /// accumulate before producing output in order to maximize compression. ++ None = ffi::MZ_NO_FLUSH as isize, ++ ++ /// All pending output is flushed to the output buffer and the output is ++ /// aligned on a byte boundary so that the decompressor can get all input ++ /// data available so far. ++ /// ++ /// Flushing may degrade compression for some compression algorithms and so ++ /// it should only be used when necessary. This will complete the current ++ /// deflate block and follow it with an empty stored block. ++ Sync = ffi::MZ_SYNC_FLUSH as isize, ++ ++ /// All pending output is flushed to the output buffer, but the output is ++ /// not aligned to a byte boundary. ++ /// ++ /// All of the input data so far will be available to the decompressor (as ++ /// with `Flush::Sync`. This completes the current deflate block and follows ++ /// it with an empty fixed codes block that is 10 bites long, and it assures ++ /// that enough bytes are output in order for the decompessor to finish the ++ /// block before the empty fixed code block. ++ Partial = ffi::MZ_PARTIAL_FLUSH as isize, ++ ++ /// All output is flushed as with `Flush::Sync` and the compression state is ++ /// reset so decompression can restart from this point if previous ++ /// compressed data has been damaged or if random access is desired. ++ /// ++ /// Using this option too often can seriously degrade compression. ++ Full = ffi::MZ_FULL_FLUSH as isize, ++ ++ /// Pending input is processed and pending output is flushed. ++ /// ++ /// The return value may indicate that the stream is not yet done and more ++ /// data has yet to be processed. ++ Finish = ffi::MZ_FINISH as isize, ++ ++ #[doc(hidden)] ++ _Nonexhaustive, ++} ++ ++#[derive(Copy, Clone, PartialEq, Eq, Debug)] ++/// Values which indicate the form of flushing to be used when ++/// decompressing in-memory data. ++pub enum FlushDecompress { ++ /// A typical parameter for passing to compression/decompression functions, ++ /// this indicates that the underlying stream to decide how much data to ++ /// accumulate before producing output in order to maximize compression. ++ None = ffi::MZ_NO_FLUSH as isize, ++ ++ /// All pending output is flushed to the output buffer and the output is ++ /// aligned on a byte boundary so that the decompressor can get all input ++ /// data available so far. ++ /// ++ /// Flushing may degrade compression for some compression algorithms and so ++ /// it should only be used when necessary. This will complete the current ++ /// deflate block and follow it with an empty stored block. ++ Sync = ffi::MZ_SYNC_FLUSH as isize, ++ ++ /// Pending input is processed and pending output is flushed. ++ /// ++ /// The return value may indicate that the stream is not yet done and more ++ /// data has yet to be processed. ++ Finish = ffi::MZ_FINISH as isize, ++ ++ #[doc(hidden)] ++ _Nonexhaustive, ++} ++ ++/// The inner state for an error when decompressing ++#[derive(Debug, Default)] ++struct DecompressErrorInner { ++ needs_dictionary: Option, ++} ++ ++/// Error returned when a decompression object finds that the input stream of ++/// bytes was not a valid input stream of bytes. ++#[derive(Debug)] ++pub struct DecompressError(DecompressErrorInner); ++ ++impl DecompressError { ++ /// Indicates whether decompression failed due to requiring a dictionary. ++ /// ++ /// The resulting integer is the Adler-32 checksum of the dictionary ++ /// required. ++ pub fn needs_dictionary(&self) -> Option { ++ self.0.needs_dictionary ++ } ++} ++ ++/// Error returned when a compression object is used incorrectly or otherwise ++/// generates an error. ++#[derive(Debug)] ++pub struct CompressError(()); ++ ++/// Possible status results of compressing some data or successfully ++/// decompressing a block of data. ++#[derive(Copy, Clone, PartialEq, Eq, Debug)] ++pub enum Status { ++ /// Indicates success. ++ /// ++ /// Means that more input may be needed but isn't available ++ /// and/or there's more output to be written but the output buffer is full. ++ Ok, ++ ++ /// Indicates that forward progress is not possible due to input or output ++ /// buffers being empty. ++ /// ++ /// For compression it means the input buffer needs some more data or the ++ /// output buffer needs to be freed up before trying again. ++ /// ++ /// For decompression this means that more input is needed to continue or ++ /// the output buffer isn't large enough to contain the result. The function ++ /// can be called again after fixing both. ++ BufError, ++ ++ /// Indicates that all input has been consumed and all output bytes have ++ /// been written. Decompression/compression should not be called again. ++ /// ++ /// For decompression with zlib streams the adler-32 of the decompressed ++ /// data has also been verified. ++ StreamEnd, ++} ++ ++impl Compress { ++ /// Creates a new object ready for compressing data that it's given. ++ /// ++ /// The `level` argument here indicates what level of compression is going ++ /// to be performed, and the `zlib_header` argument indicates whether the ++ /// output data should have a zlib header or not. ++ pub fn new(level: Compression, zlib_header: bool) -> Compress { ++ unsafe { ++ let mut state = ffi::StreamWrapper::default(); ++ let ret = ffi::mz_deflateInit2( ++ &mut *state, ++ level.0 as c_int, ++ ffi::MZ_DEFLATED, ++ if zlib_header { ++ ffi::MZ_DEFAULT_WINDOW_BITS ++ } else { ++ -ffi::MZ_DEFAULT_WINDOW_BITS ++ }, ++ 9, ++ ffi::MZ_DEFAULT_STRATEGY, ++ ); ++ debug_assert_eq!(ret, 0); ++ Compress { ++ inner: Stream { ++ stream_wrapper: state, ++ total_in: 0, ++ total_out: 0, ++ _marker: marker::PhantomData, ++ }, ++ } ++ } ++ } ++ ++ /// Returns the total number of input bytes which have been processed by ++ /// this compression object. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in ++ } ++ ++ /// Returns the total number of output bytes which have been produced by ++ /// this compression object. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out ++ } ++ ++ /// Specifies the compression dictionary to use. ++ /// ++ /// Returns the Adler-32 checksum of the dictionary. ++ #[cfg(feature = "zlib")] ++ pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result { ++ let stream = &mut *self.inner.stream_wrapper; ++ let rc = unsafe { ++ ffi::deflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt) ++ }; ++ ++ match rc { ++ ffi::MZ_STREAM_ERROR => Err(CompressError(())), ++ ffi::MZ_OK => Ok(stream.adler as u32), ++ c => panic!("unknown return code: {}", c), ++ } ++ } ++ ++ /// Quickly resets this compressor without having to reallocate anything. ++ /// ++ /// This is equivalent to dropping this object and then creating a new one. ++ pub fn reset(&mut self) { ++ let rc = unsafe { ffi::mz_deflateReset(&mut *self.inner.stream_wrapper) }; ++ assert_eq!(rc, ffi::MZ_OK); ++ ++ self.inner.total_in = 0; ++ self.inner.total_out = 0; ++ } ++ ++ /// Dynamically updates the compression level. ++ /// ++ /// This can be used to switch between compression levels for different ++ /// kinds of data, or it can be used in conjunction with a call to reset ++ /// to reuse the compressor. ++ /// ++ /// This may return an error if there wasn't enough output space to complete ++ /// the compression of the available input data before changing the ++ /// compression level. Flushing the stream before calling this method ++ /// ensures that the function will succeed on the first call. ++ #[cfg(feature = "zlib")] ++ pub fn set_level(&mut self, level: Compression) -> Result<(), CompressError> { ++ let stream = &mut *self.inner.stream_wrapper; ++ ++ let rc = unsafe { ffi::deflateParams(stream, level.0 as c_int, ffi::MZ_DEFAULT_STRATEGY) }; ++ ++ match rc { ++ ffi::MZ_OK => Ok(()), ++ ffi::MZ_BUF_ERROR => Err(CompressError(())), ++ c => panic!("unknown return code: {}", c), ++ } ++ } ++ ++ /// Compresses the input data into the output, consuming only as much ++ /// input as needed and writing as much output as possible. ++ /// ++ /// The flush option can be any of the available `FlushCompress` parameters. ++ /// ++ /// To learn how much data was consumed or how much output was produced, use ++ /// the `total_in` and `total_out` functions before/after this is called. ++ pub fn compress( ++ &mut self, ++ input: &[u8], ++ output: &mut [u8], ++ flush: FlushCompress, ++ ) -> Result { ++ let raw = &mut *self.inner.stream_wrapper; ++ raw.next_in = input.as_ptr() as *mut _; ++ raw.avail_in = input.len() as c_uint; ++ raw.next_out = output.as_mut_ptr(); ++ raw.avail_out = output.len() as c_uint; ++ ++ let rc = unsafe { ffi::mz_deflate(raw, flush as c_int) }; ++ ++ // Unfortunately the total counters provided by zlib might be only ++ // 32 bits wide and overflow while processing large amounts of data. ++ self.inner.total_in += (raw.next_in as usize - input.as_ptr() as usize) as u64; ++ self.inner.total_out += (raw.next_out as usize - output.as_ptr() as usize) as u64; ++ ++ match rc { ++ ffi::MZ_OK => Ok(Status::Ok), ++ ffi::MZ_BUF_ERROR => Ok(Status::BufError), ++ ffi::MZ_STREAM_END => Ok(Status::StreamEnd), ++ ffi::MZ_STREAM_ERROR => Err(CompressError(())), ++ c => panic!("unknown return code: {}", c), ++ } ++ } ++ ++ /// Compresses the input data into the extra space of the output, consuming ++ /// only as much input as needed and writing as much output as possible. ++ /// ++ /// This function has the same semantics as `compress`, except that the ++ /// length of `vec` is managed by this function. This will not reallocate ++ /// the vector provided or attempt to grow it, so space for the output must ++ /// be reserved in the output vector by the caller before calling this ++ /// function. ++ pub fn compress_vec( ++ &mut self, ++ input: &[u8], ++ output: &mut Vec, ++ flush: FlushCompress, ++ ) -> Result { ++ let cap = output.capacity(); ++ let len = output.len(); ++ ++ unsafe { ++ let before = self.total_out(); ++ let ret = { ++ let ptr = output.as_mut_ptr().offset(len as isize); ++ let out = slice::from_raw_parts_mut(ptr, cap - len); ++ self.compress(input, out, flush) ++ }; ++ output.set_len((self.total_out() - before) as usize + len); ++ return ret; ++ } ++ } ++} ++ ++impl Decompress { ++ /// Creates a new object ready for decompressing data that it's given. ++ /// ++ /// The `zlib_header` argument indicates whether the input data is expected ++ /// to have a zlib header or not. ++ pub fn new(zlib_header: bool) -> Decompress { ++ unsafe { ++ let mut state = ffi::StreamWrapper::default(); ++ let ret = ffi::mz_inflateInit2( ++ &mut *state, ++ if zlib_header { ++ ffi::MZ_DEFAULT_WINDOW_BITS ++ } else { ++ -ffi::MZ_DEFAULT_WINDOW_BITS ++ }, ++ ); ++ debug_assert_eq!(ret, 0); ++ Decompress { ++ inner: Stream { ++ stream_wrapper: state, ++ total_in: 0, ++ total_out: 0, ++ _marker: marker::PhantomData, ++ }, ++ } ++ } ++ } ++ ++ /// Returns the total number of input bytes which have been processed by ++ /// this decompression object. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in ++ } ++ ++ /// Returns the total number of output bytes which have been produced by ++ /// this decompression object. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out ++ } ++ ++ /// Decompresses the input data into the output, consuming only as much ++ /// input as needed and writing as much output as possible. ++ /// ++ /// The flush option can be any of the available `FlushDecompress` parameters. ++ /// ++ /// If the first call passes `FlushDecompress::Finish` it is assumed that ++ /// the input and output buffers are both sized large enough to decompress ++ /// the entire stream in a single call. ++ /// ++ /// A flush value of `FlushDecompress::Finish` indicates that there are no ++ /// more source bytes available beside what's already in the input buffer, ++ /// and the output buffer is large enough to hold the rest of the ++ /// decompressed data. ++ /// ++ /// To learn how much data was consumed or how much output was produced, use ++ /// the `total_in` and `total_out` functions before/after this is called. ++ /// ++ /// # Errors ++ /// ++ /// If the input data to this instance of `Decompress` is not a valid ++ /// zlib/deflate stream then this function may return an instance of ++ /// `DecompressError` to indicate that the stream of input bytes is corrupted. ++ pub fn decompress( ++ &mut self, ++ input: &[u8], ++ output: &mut [u8], ++ flush: FlushDecompress, ++ ) -> Result { ++ let raw = &mut *self.inner.stream_wrapper; ++ raw.next_in = input.as_ptr() as *mut u8; ++ raw.avail_in = input.len() as c_uint; ++ raw.next_out = output.as_mut_ptr(); ++ raw.avail_out = output.len() as c_uint; ++ ++ let rc = unsafe { ffi::mz_inflate(raw, flush as c_int) }; ++ ++ // Unfortunately the total counters provided by zlib might be only ++ // 32 bits wide and overflow while processing large amounts of data. ++ self.inner.total_in += (raw.next_in as usize - input.as_ptr() as usize) as u64; ++ self.inner.total_out += (raw.next_out as usize - output.as_ptr() as usize) as u64; ++ ++ match rc { ++ ffi::MZ_DATA_ERROR | ffi::MZ_STREAM_ERROR => Err(DecompressError(Default::default())), ++ ffi::MZ_OK => Ok(Status::Ok), ++ ffi::MZ_BUF_ERROR => Ok(Status::BufError), ++ ffi::MZ_STREAM_END => Ok(Status::StreamEnd), ++ ffi::MZ_NEED_DICT => Err(DecompressError(DecompressErrorInner { ++ needs_dictionary: Some(raw.adler as u32), ++ })), ++ c => panic!("unknown return code: {}", c), ++ } ++ } ++ ++ /// Decompresses the input data into the extra space in the output vector ++ /// specified by `output`. ++ /// ++ /// This function has the same semantics as `decompress`, except that the ++ /// length of `vec` is managed by this function. This will not reallocate ++ /// the vector provided or attempt to grow it, so space for the output must ++ /// be reserved in the output vector by the caller before calling this ++ /// function. ++ /// ++ /// # Errors ++ /// ++ /// If the input data to this instance of `Decompress` is not a valid ++ /// zlib/deflate stream then this function may return an instance of ++ /// `DecompressError` to indicate that the stream of input bytes is corrupted. ++ pub fn decompress_vec( ++ &mut self, ++ input: &[u8], ++ output: &mut Vec, ++ flush: FlushDecompress, ++ ) -> Result { ++ let cap = output.capacity(); ++ let len = output.len(); ++ ++ unsafe { ++ let before = self.total_out(); ++ let ret = { ++ let ptr = output.as_mut_ptr().offset(len as isize); ++ let out = slice::from_raw_parts_mut(ptr, cap - len); ++ self.decompress(input, out, flush) ++ }; ++ output.set_len((self.total_out() - before) as usize + len); ++ return ret; ++ } ++ } ++ ++ /// Specifies the decompression dictionary to use. ++ #[cfg(feature = "zlib")] ++ pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result { ++ let stream = &mut *self.inner.stream_wrapper; ++ let rc = unsafe { ++ ffi::inflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt) ++ }; ++ ++ match rc { ++ ffi::MZ_STREAM_ERROR => Err(DecompressError(Default::default())), ++ ffi::MZ_DATA_ERROR => Err(DecompressError(DecompressErrorInner { ++ needs_dictionary: Some(stream.adler as u32), ++ })), ++ ffi::MZ_OK => Ok(stream.adler as u32), ++ c => panic!("unknown return code: {}", c), ++ } ++ } ++ ++ /// Performs the equivalent of replacing this decompression state with a ++ /// freshly allocated copy. ++ /// ++ /// This function may not allocate memory, though, and attempts to reuse any ++ /// previously existing resources. ++ /// ++ /// The argument provided here indicates whether the reset state will ++ /// attempt to decode a zlib header first or not. ++ pub fn reset(&mut self, zlib_header: bool) { ++ self._reset(zlib_header); ++ } ++ ++ #[cfg(feature = "zlib")] ++ fn _reset(&mut self, zlib_header: bool) { ++ let bits = if zlib_header { ++ ffi::MZ_DEFAULT_WINDOW_BITS ++ } else { ++ -ffi::MZ_DEFAULT_WINDOW_BITS ++ }; ++ unsafe { ++ ffi::inflateReset2(&mut *self.inner.stream_wrapper, bits); ++ } ++ self.inner.total_out = 0; ++ self.inner.total_in = 0; ++ } ++ ++ #[cfg(not(feature = "zlib"))] ++ fn _reset(&mut self, zlib_header: bool) { ++ *self = Decompress::new(zlib_header); ++ } ++} ++ ++impl Error for DecompressError { ++ fn description(&self) -> &str { ++ "deflate decompression error" ++ } ++} ++ ++impl From for io::Error { ++ fn from(data: DecompressError) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, data) ++ } ++} ++ ++impl fmt::Display for DecompressError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.description().fmt(f) ++ } ++} ++ ++impl Error for CompressError { ++ fn description(&self) -> &str { ++ "deflate compression error" ++ } ++} ++ ++impl From for io::Error { ++ fn from(data: CompressError) -> io::Error { ++ io::Error::new(io::ErrorKind::Other, data) ++ } ++} ++ ++impl fmt::Display for CompressError { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.description().fmt(f) ++ } ++} ++ ++impl Direction for DirCompress { ++ unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { ++ ffi::mz_deflateEnd(stream) ++ } ++} ++impl Direction for DirDecompress { ++ unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { ++ ffi::mz_inflateEnd(stream) ++ } ++} ++ ++impl Drop for Stream { ++ fn drop(&mut self) { ++ unsafe { ++ let _ = D::destroy(&mut *self.stream_wrapper); ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::io::Write; ++ ++ use write; ++ use {Compression, Decompress, FlushDecompress}; ++ ++ #[cfg(feature = "zlib")] ++ use {Compress, FlushCompress}; ++ ++ #[test] ++ fn issue51() { ++ let data = vec![ ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb3, 0xc9, 0x28, 0xc9, ++ 0xcd, 0xb1, 0xe3, 0xe5, 0xb2, 0xc9, 0x48, 0x4d, 0x4c, 0xb1, 0xb3, 0x29, 0xc9, 0x2c, ++ 0xc9, 0x49, 0xb5, 0x33, 0x31, 0x30, 0x51, 0xf0, 0xcb, 0x2f, 0x51, 0x70, 0xcb, 0x2f, ++ 0xcd, 0x4b, 0xb1, 0xd1, 0x87, 0x08, 0xda, 0xe8, 0x83, 0x95, 0x00, 0x95, 0x26, 0xe5, ++ 0xa7, 0x54, 0x2a, 0x24, 0xa5, 0x27, 0xe7, 0xe7, 0xe4, 0x17, 0xd9, 0x2a, 0x95, 0x67, ++ 0x64, 0x96, 0xa4, 0x2a, 0x81, 0x8c, 0x48, 0x4e, 0xcd, 0x2b, 0x49, 0x2d, 0xb2, 0xb3, ++ 0xc9, 0x30, 0x44, 0x37, 0x01, 0x28, 0x62, 0xa3, 0x0f, 0x95, 0x06, 0xd9, 0x05, 0x54, ++ 0x04, 0xe5, 0xe5, 0xa5, 0x67, 0xe6, 0x55, 0xe8, 0x1b, 0xea, 0x99, 0xe9, 0x19, 0x21, ++ 0xab, 0xd0, 0x07, 0xd9, 0x01, 0x32, 0x53, 0x1f, 0xea, 0x3e, 0x00, 0x94, 0x85, 0xeb, ++ 0xe4, 0xa8, 0x00, 0x00, 0x00, ++ ]; ++ ++ let mut decoded = Vec::with_capacity(data.len() * 2); ++ ++ let mut d = Decompress::new(false); ++ // decompressed whole deflate stream ++ assert!( ++ d.decompress_vec(&data[10..], &mut decoded, FlushDecompress::Finish) ++ .is_ok() ++ ); ++ ++ // decompress data that has nothing to do with the deflate stream (this ++ // used to panic) ++ drop(d.decompress_vec(&[0], &mut decoded, FlushDecompress::None)); ++ } ++ ++ #[test] ++ fn reset() { ++ let string = "hello world".as_bytes(); ++ let mut zlib = Vec::new(); ++ let mut deflate = Vec::new(); ++ ++ let comp = Compression::default(); ++ write::ZlibEncoder::new(&mut zlib, comp) ++ .write_all(string) ++ .unwrap(); ++ write::DeflateEncoder::new(&mut deflate, comp) ++ .write_all(string) ++ .unwrap(); ++ ++ let mut dst = [0; 1024]; ++ let mut decoder = Decompress::new(true); ++ decoder ++ .decompress(&zlib, &mut dst, FlushDecompress::Finish) ++ .unwrap(); ++ assert_eq!(decoder.total_out(), string.len() as u64); ++ assert!(dst.starts_with(string)); ++ ++ decoder.reset(false); ++ decoder ++ .decompress(&deflate, &mut dst, FlushDecompress::Finish) ++ .unwrap(); ++ assert_eq!(decoder.total_out(), string.len() as u64); ++ assert!(dst.starts_with(string)); ++ } ++ ++ #[cfg(feature = "zlib")] ++ #[test] ++ fn set_dictionary_with_zlib_header() { ++ let string = "hello, hello!".as_bytes(); ++ let dictionary = "hello".as_bytes(); ++ ++ let mut encoded = Vec::with_capacity(1024); ++ ++ let mut encoder = Compress::new(Compression::default(), true); ++ ++ let dictionary_adler = encoder.set_dictionary(&dictionary).unwrap(); ++ ++ encoder ++ .compress_vec(string, &mut encoded, FlushCompress::Finish) ++ .unwrap(); ++ ++ assert_eq!(encoder.total_in(), string.len() as u64); ++ assert_eq!(encoder.total_out(), encoded.len() as u64); ++ ++ let mut decoder = Decompress::new(true); ++ let mut decoded = [0; 1024]; ++ let decompress_error = decoder ++ .decompress(&encoded, &mut decoded, FlushDecompress::Finish) ++ .expect_err("decompression should fail due to requiring a dictionary"); ++ ++ let required_adler = decompress_error.needs_dictionary() ++ .expect("the first call to decompress should indicate a dictionary is required along with the required Adler-32 checksum"); ++ ++ assert_eq!(required_adler, dictionary_adler, ++ "the Adler-32 checksum should match the value when the dictionary was set on the compressor"); ++ ++ let actual_adler = decoder.set_dictionary(&dictionary).unwrap(); ++ ++ assert_eq!(required_adler, actual_adler); ++ ++ // Decompress the rest of the input to the remainder of the output buffer ++ let total_in = decoder.total_in(); ++ let total_out = decoder.total_out(); ++ ++ let decompress_result = decoder.decompress( ++ &encoded[total_in as usize..], ++ &mut decoded[total_out as usize..], ++ FlushDecompress::Finish, ++ ); ++ assert!(decompress_result.is_ok()); ++ ++ assert_eq!(&decoded[..decoder.total_out() as usize], string); ++ } ++ ++ #[cfg(feature = "zlib")] ++ #[test] ++ fn set_dictionary_raw() { ++ let string = "hello, hello!".as_bytes(); ++ let dictionary = "hello".as_bytes(); ++ ++ let mut encoded = Vec::with_capacity(1024); ++ ++ let mut encoder = Compress::new(Compression::default(), false); ++ ++ encoder.set_dictionary(&dictionary).unwrap(); ++ ++ encoder ++ .compress_vec(string, &mut encoded, FlushCompress::Finish) ++ .unwrap(); ++ ++ assert_eq!(encoder.total_in(), string.len() as u64); ++ assert_eq!(encoder.total_out(), encoded.len() as u64); ++ ++ let mut decoder = Decompress::new(false); ++ ++ decoder.set_dictionary(&dictionary).unwrap(); ++ ++ let mut decoded = [0; 1024]; ++ let decompress_result = decoder.decompress(&encoded, &mut decoded, FlushDecompress::Finish); ++ ++ assert!(decompress_result.is_ok()); ++ ++ assert_eq!(&decoded[..decoder.total_out() as usize], string); ++ } ++ ++} diff --cc vendor/flate2-1.0.2/src/zio.rs index 000000000,000000000..1222a6c3e new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/zio.rs @@@ -1,0 -1,0 +1,290 @@@ ++use std::io; ++use std::io::prelude::*; ++use std::mem; ++ ++use {Compress, Decompress, DecompressError, FlushCompress, FlushDecompress, Status}; ++ ++#[derive(Debug)] ++pub struct Writer { ++ obj: Option, ++ pub data: D, ++ buf: Vec, ++} ++ ++pub trait Ops { ++ type Flush: Flush; ++ fn total_in(&self) -> u64; ++ fn total_out(&self) -> u64; ++ fn run( ++ &mut self, ++ input: &[u8], ++ output: &mut [u8], ++ flush: Self::Flush, ++ ) -> Result; ++ fn run_vec( ++ &mut self, ++ input: &[u8], ++ output: &mut Vec, ++ flush: Self::Flush, ++ ) -> Result; ++} ++ ++impl Ops for Compress { ++ type Flush = FlushCompress; ++ fn total_in(&self) -> u64 { ++ self.total_in() ++ } ++ fn total_out(&self) -> u64 { ++ self.total_out() ++ } ++ fn run( ++ &mut self, ++ input: &[u8], ++ output: &mut [u8], ++ flush: FlushCompress, ++ ) -> Result { ++ Ok(self.compress(input, output, flush).unwrap()) ++ } ++ fn run_vec( ++ &mut self, ++ input: &[u8], ++ output: &mut Vec, ++ flush: FlushCompress, ++ ) -> Result { ++ Ok(self.compress_vec(input, output, flush).unwrap()) ++ } ++} ++ ++impl Ops for Decompress { ++ type Flush = FlushDecompress; ++ fn total_in(&self) -> u64 { ++ self.total_in() ++ } ++ fn total_out(&self) -> u64 { ++ self.total_out() ++ } ++ fn run( ++ &mut self, ++ input: &[u8], ++ output: &mut [u8], ++ flush: FlushDecompress, ++ ) -> Result { ++ self.decompress(input, output, flush) ++ } ++ fn run_vec( ++ &mut self, ++ input: &[u8], ++ output: &mut Vec, ++ flush: FlushDecompress, ++ ) -> Result { ++ self.decompress_vec(input, output, flush) ++ } ++} ++ ++pub trait Flush { ++ fn none() -> Self; ++ fn sync() -> Self; ++ fn finish() -> Self; ++} ++ ++impl Flush for FlushCompress { ++ fn none() -> Self { ++ FlushCompress::None ++ } ++ ++ fn sync() -> Self { ++ FlushCompress::Sync ++ } ++ ++ fn finish() -> Self { ++ FlushCompress::Finish ++ } ++} ++ ++impl Flush for FlushDecompress { ++ fn none() -> Self { ++ FlushDecompress::None ++ } ++ ++ fn sync() -> Self { ++ FlushDecompress::Sync ++ } ++ ++ fn finish() -> Self { ++ FlushDecompress::Finish ++ } ++} ++ ++pub fn read(obj: &mut R, data: &mut D, dst: &mut [u8]) -> io::Result ++where ++ R: BufRead, ++ D: Ops, ++{ ++ loop { ++ let (read, consumed, ret, eof); ++ { ++ let input = obj.fill_buf()?; ++ eof = input.is_empty(); ++ let before_out = data.total_out(); ++ let before_in = data.total_in(); ++ let flush = if eof { ++ D::Flush::finish() ++ } else { ++ D::Flush::none() ++ }; ++ ret = data.run(input, dst, flush); ++ read = (data.total_out() - before_out) as usize; ++ consumed = (data.total_in() - before_in) as usize; ++ } ++ obj.consume(consumed); ++ ++ match ret { ++ // If we haven't ready any data and we haven't hit EOF yet, ++ // then we need to keep asking for more data because if we ++ // return that 0 bytes of data have been read then it will ++ // be interpreted as EOF. ++ Ok(Status::Ok) | Ok(Status::BufError) if read == 0 && !eof && dst.len() > 0 => continue, ++ Ok(Status::Ok) | Ok(Status::BufError) | Ok(Status::StreamEnd) => return Ok(read), ++ ++ Err(..) => { ++ return Err(io::Error::new( ++ io::ErrorKind::InvalidInput, ++ "corrupt deflate stream", ++ )) ++ } ++ } ++ } ++} ++ ++impl Writer { ++ pub fn new(w: W, d: D) -> Writer { ++ Writer { ++ obj: Some(w), ++ data: d, ++ buf: Vec::with_capacity(32 * 1024), ++ } ++ } ++ ++ pub fn finish(&mut self) -> io::Result<()> { ++ loop { ++ self.dump()?; ++ ++ let before = self.data.total_out(); ++ self.data.run_vec(&[], &mut self.buf, D::Flush::finish())?; ++ if before == self.data.total_out() { ++ return Ok(()); ++ } ++ } ++ } ++ ++ pub fn replace(&mut self, w: W) -> W { ++ self.buf.truncate(0); ++ mem::replace(self.get_mut(), w) ++ } ++ ++ pub fn get_ref(&self) -> &W { ++ self.obj.as_ref().unwrap() ++ } ++ ++ pub fn get_mut(&mut self) -> &mut W { ++ self.obj.as_mut().unwrap() ++ } ++ ++ // Note that this should only be called if the outer object is just about ++ // to be consumed! ++ // ++ // (e.g. an implementation of `into_inner`) ++ pub fn take_inner(&mut self) -> W { ++ self.obj.take().unwrap() ++ } ++ ++ pub fn is_present(&self) -> bool { ++ self.obj.is_some() ++ } ++ ++ // Returns total written bytes and status of underlying codec ++ pub(crate) fn write_with_status(&mut self, buf: &[u8]) -> io::Result<(usize, Status)> { ++ // miniz isn't guaranteed to actually write any of the buffer provided, ++ // it may be in a flushing mode where it's just giving us data before ++ // we're actually giving it any data. We don't want to spuriously return ++ // `Ok(0)` when possible as it will cause calls to write_all() to fail. ++ // As a result we execute this in a loop to ensure that we try our ++ // darndest to write the data. ++ loop { ++ self.dump()?; ++ ++ let before_in = self.data.total_in(); ++ let ret = self.data.run_vec(buf, &mut self.buf, D::Flush::none()); ++ let written = (self.data.total_in() - before_in) as usize; ++ ++ let is_stream_end = match ret { ++ Ok(Status::StreamEnd) => true, ++ _ => false, ++ }; ++ ++ if buf.len() > 0 && written == 0 && ret.is_ok() && !is_stream_end { ++ continue; ++ } ++ return match ret { ++ Ok(st) => match st { ++ Status::Ok | Status::BufError | Status::StreamEnd => Ok((written, st)), ++ }, ++ Err(..) => Err(io::Error::new( ++ io::ErrorKind::InvalidInput, ++ "corrupt deflate stream", ++ )), ++ }; ++ } ++ } ++ ++ fn dump(&mut self) -> io::Result<()> { ++ // TODO: should manage this buffer not with `drain` but probably more of ++ // a deque-like strategy. ++ while self.buf.len() > 0 { ++ let n = try!(self.obj.as_mut().unwrap().write(&self.buf)); ++ if n == 0 { ++ return Err(io::ErrorKind::WriteZero.into()); ++ } ++ self.buf.drain(..n); ++ } ++ Ok(()) ++ } ++} ++ ++impl Write for Writer { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.write_with_status(buf).map(|res| res.0) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.data ++ .run_vec(&[], &mut self.buf, D::Flush::sync()) ++ .unwrap(); ++ ++ // Unfortunately miniz doesn't actually tell us when we're done with ++ // pulling out all the data from the internal stream. To remedy this we ++ // have to continually ask the stream for more memory until it doesn't ++ // give us a chunk of memory the same size as our own internal buffer, ++ // at which point we assume it's reached the end. ++ loop { ++ self.dump()?; ++ let before = self.data.total_out(); ++ self.data ++ .run_vec(&[], &mut self.buf, D::Flush::none()) ++ .unwrap(); ++ if before == self.data.total_out() { ++ break; ++ } ++ } ++ ++ self.obj.as_mut().unwrap().flush() ++ } ++} ++ ++impl Drop for Writer { ++ fn drop(&mut self) { ++ if self.obj.is_some() { ++ let _ = self.finish(); ++ } ++ } ++} diff --cc vendor/flate2-1.0.2/src/zlib/bufread.rs index 000000000,000000000..9556e2506 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/zlib/bufread.rs @@@ -1,0 -1,0 +1,258 @@@ ++use std::io::prelude::*; ++use std::io; ++use std::mem; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use zio; ++use {Compress, Decompress}; ++ ++/// A ZLIB encoder, or compressor. ++/// ++/// This structure implements a [`BufRead`] interface and will read uncompressed ++/// data from an underlying stream and emit a stream of compressed data. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use flate2::Compression; ++/// use flate2::bufread::ZlibEncoder; ++/// use std::fs::File; ++/// use std::io::BufReader; ++/// ++/// // Use a buffered file to compress contents into a Vec ++/// ++/// # fn open_hello_world() -> std::io::Result> { ++/// let f = File::open("examples/hello_world.txt")?; ++/// let b = BufReader::new(f); ++/// let mut z = ZlibEncoder::new(b, Compression::fast()); ++/// let mut buffer = Vec::new(); ++/// z.read_to_end(&mut buffer)?; ++/// # Ok(buffer) ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibEncoder { ++ obj: R, ++ data: Compress, ++} ++ ++impl ZlibEncoder { ++ /// Creates a new encoder which will read uncompressed data from the given ++ /// stream and emit the compressed stream. ++ pub fn new(r: R, level: ::Compression) -> ZlibEncoder { ++ ZlibEncoder { ++ obj: r, ++ data: Compress::new(level, true), ++ } ++ } ++} ++ ++pub fn reset_encoder_data(zlib: &mut ZlibEncoder) { ++ zlib.data.reset() ++} ++ ++impl ZlibEncoder { ++ /// Resets the state of this encoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This function will reset the internal state of this encoder and replace ++ /// the input stream with the one provided, returning the previous input ++ /// stream. Future data read from this encoder will be the compressed ++ /// version of `r`'s data. ++ pub fn reset(&mut self, r: R) -> R { ++ reset_encoder_data(self); ++ mem::replace(&mut self.obj, r) ++ } ++ ++ /// Acquires a reference to the underlying reader ++ pub fn get_ref(&self) -> &R { ++ &self.obj ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.obj ++ } ++ ++ /// Consumes this encoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.obj ++ } ++ ++ /// Returns the number of bytes that have been read into this compressor. ++ /// ++ /// Note that not all bytes read from the underlying object may be accounted ++ /// for, there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been read yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.data.total_out() ++ } ++} ++ ++impl Read for ZlibEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ zio::read(&mut self.obj, &mut self.data, buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibEncoder {} ++ ++impl Write for ZlibEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} ++ ++/// A ZLIB decoder, or decompressor. ++/// ++/// This structure implements a [`BufRead`] interface and takes a stream of ++/// compressed data as input, providing the decompressed data when read from. ++/// ++/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::ZlibEncoder; ++/// use flate2::bufread::ZlibDecoder; ++/// ++/// # fn main() { ++/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_bufreader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements BufRead ++/// ++/// fn decode_bufreader(bytes: Vec) -> io::Result { ++/// let mut z = ZlibDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// z.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibDecoder { ++ obj: R, ++ data: Decompress, ++} ++ ++impl ZlibDecoder { ++ /// Creates a new decoder which will decompress data read from the given ++ /// stream. ++ pub fn new(r: R) -> ZlibDecoder { ++ ZlibDecoder { ++ obj: r, ++ data: Decompress::new(true), ++ } ++ } ++} ++ ++pub fn reset_decoder_data(zlib: &mut ZlibDecoder) { ++ zlib.data = Decompress::new(true); ++} ++ ++impl ZlibDecoder { ++ /// Resets the state of this decoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This will reset the internal state of this decoder and replace the ++ /// input stream with the one provided, returning the previous input ++ /// stream. Future data read from this decoder will be the decompressed ++ /// version of `r`'s data. ++ pub fn reset(&mut self, r: R) -> R { ++ reset_decoder_data(self); ++ mem::replace(&mut self.obj, r) ++ } ++ ++ /// Acquires a reference to the underlying stream ++ pub fn get_ref(&self) -> &R { ++ &self.obj ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ &mut self.obj ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ pub fn into_inner(self) -> R { ++ self.obj ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed. ++ /// ++ /// Note that this will likely be smaller than what the decompressor ++ /// actually read from the underlying stream due to buffering. ++ pub fn total_in(&self) -> u64 { ++ self.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has produced. ++ pub fn total_out(&self) -> u64 { ++ self.data.total_out() ++ } ++} ++ ++impl Read for ZlibDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ zio::read(&mut self.obj, &mut self.data, into) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibDecoder {} ++ ++impl Write for ZlibDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} diff --cc vendor/flate2-1.0.2/src/zlib/mod.rs index 000000000,000000000..c729df1c2 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/zlib/mod.rs @@@ -1,0 -1,0 +1,159 @@@ ++pub mod bufread; ++pub mod read; ++pub mod write; ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ use std::io; ++ ++ use rand::{thread_rng, Rng}; ++ ++ use zlib::{read, write}; ++ use Compression; ++ ++ #[test] ++ fn roundtrip() { ++ let mut real = Vec::new(); ++ let mut w = write::ZlibEncoder::new(Vec::new(), Compression::default()); ++ let v = ::random_bytes().take(1024).collect::>(); ++ for _ in 0..200 { ++ let to_write = &v[..thread_rng().gen_range(0, v.len())]; ++ real.extend(to_write.iter().map(|x| *x)); ++ w.write_all(to_write).unwrap(); ++ } ++ let result = w.finish().unwrap(); ++ let mut r = read::ZlibDecoder::new(&result[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == real); ++ } ++ ++ #[test] ++ fn drop_writes() { ++ let mut data = Vec::new(); ++ write::ZlibEncoder::new(&mut data, Compression::default()) ++ .write_all(b"foo") ++ .unwrap(); ++ let mut r = read::ZlibDecoder::new(&data[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == b"foo"); ++ } ++ ++ #[test] ++ fn total_in() { ++ let mut real = Vec::new(); ++ let mut w = write::ZlibEncoder::new(Vec::new(), Compression::default()); ++ let v = ::random_bytes().take(1024).collect::>(); ++ for _ in 0..200 { ++ let to_write = &v[..thread_rng().gen_range(0, v.len())]; ++ real.extend(to_write.iter().map(|x| *x)); ++ w.write_all(to_write).unwrap(); ++ } ++ let mut result = w.finish().unwrap(); ++ ++ let result_len = result.len(); ++ ++ for _ in 0..200 { ++ result.extend(v.iter().map(|x| *x)); ++ } ++ ++ let mut r = read::ZlibDecoder::new(&result[..]); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert!(ret == real); ++ assert_eq!(r.total_in(), result_len as u64); ++ } ++ ++ #[test] ++ fn roundtrip2() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut r = read::ZlibDecoder::new(read::ZlibEncoder::new(&v[..], Compression::default())); ++ let mut ret = Vec::new(); ++ r.read_to_end(&mut ret).unwrap(); ++ assert_eq!(ret, v); ++ } ++ ++ #[test] ++ fn roundtrip3() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut w = ++ write::ZlibEncoder::new(write::ZlibDecoder::new(Vec::new()), Compression::default()); ++ w.write_all(&v).unwrap(); ++ let w = w.finish().unwrap().finish().unwrap(); ++ assert!(w == v); ++ } ++ ++ #[test] ++ fn reset_decoder() { ++ let v = ::random_bytes().take(1024 * 1024).collect::>(); ++ let mut w = write::ZlibEncoder::new(Vec::new(), Compression::default()); ++ w.write_all(&v).unwrap(); ++ let data = w.finish().unwrap(); ++ ++ { ++ let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); ++ let mut r = read::ZlibDecoder::new(&data[..]); ++ r.read_to_end(&mut a).unwrap(); ++ r.reset(&data); ++ r.read_to_end(&mut b).unwrap(); ++ ++ let mut r = read::ZlibDecoder::new(&data[..]); ++ r.read_to_end(&mut c).unwrap(); ++ assert!(a == b && b == c && c == v); ++ } ++ ++ { ++ let mut w = write::ZlibDecoder::new(Vec::new()); ++ w.write_all(&data).unwrap(); ++ let a = w.reset(Vec::new()).unwrap(); ++ w.write_all(&data).unwrap(); ++ let b = w.finish().unwrap(); ++ ++ let mut w = write::ZlibDecoder::new(Vec::new()); ++ w.write_all(&data).unwrap(); ++ let c = w.finish().unwrap(); ++ assert!(a == b && b == c && c == v); ++ } ++ } ++ ++ #[test] ++ fn bad_input() { ++ // regress tests: previously caused a panic on drop ++ let mut out: Vec = Vec::new(); ++ let data: Vec = (0..255).cycle().take(1024).collect(); ++ let mut w = write::ZlibDecoder::new(&mut out); ++ match w.write_all(&data[..]) { ++ Ok(_) => panic!("Expected an error to be returned!"), ++ Err(e) => assert_eq!(e.kind(), io::ErrorKind::InvalidInput), ++ } ++ } ++ ++ #[test] ++ fn qc_reader() { ++ ::quickcheck::quickcheck(test as fn(_) -> _); ++ ++ fn test(v: Vec) -> bool { ++ let mut r = ++ read::ZlibDecoder::new(read::ZlibEncoder::new(&v[..], Compression::default())); ++ let mut v2 = Vec::new(); ++ r.read_to_end(&mut v2).unwrap(); ++ v == v2 ++ } ++ } ++ ++ #[test] ++ fn qc_writer() { ++ ::quickcheck::quickcheck(test as fn(_) -> _); ++ ++ fn test(v: Vec) -> bool { ++ let mut w = write::ZlibEncoder::new( ++ write::ZlibDecoder::new(Vec::new()), ++ Compression::default(), ++ ); ++ w.write_all(&v).unwrap(); ++ v == w.finish().unwrap().finish().unwrap() ++ } ++ } ++} diff --cc vendor/flate2-1.0.2/src/zlib/read.rs index 000000000,000000000..4b6bec8dd new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/zlib/read.rs @@@ -1,0 -1,0 +1,265 @@@ ++use std::io::prelude::*; ++use std::io; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use bufreader::BufReader; ++use super::bufread; ++ ++/// A ZLIB encoder, or compressor. ++/// ++/// This structure implements a [`Read`] interface and will read uncompressed ++/// data from an underlying stream and emit a stream of compressed data. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use flate2::Compression; ++/// use flate2::read::ZlibEncoder; ++/// use std::fs::File; ++/// ++/// // Open example file and compress the contents using Read interface ++/// ++/// # fn open_hello_world() -> std::io::Result> { ++/// let f = File::open("examples/hello_world.txt")?; ++/// let mut z = ZlibEncoder::new(f, Compression::fast()); ++/// let mut buffer = [0;50]; ++/// let byte_count = z.read(&mut buffer)?; ++/// # Ok(buffer[0..byte_count].to_vec()) ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibEncoder { ++ inner: bufread::ZlibEncoder>, ++} ++ ++impl ZlibEncoder { ++ /// Creates a new encoder which will read uncompressed data from the given ++ /// stream and emit the compressed stream. ++ pub fn new(r: R, level: ::Compression) -> ZlibEncoder { ++ ZlibEncoder { ++ inner: bufread::ZlibEncoder::new(BufReader::new(r), level), ++ } ++ } ++} ++ ++impl ZlibEncoder { ++ /// Resets the state of this encoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This function will reset the internal state of this encoder and replace ++ /// the input stream with the one provided, returning the previous input ++ /// stream. Future data read from this encoder will be the compressed ++ /// version of `r`'s data. ++ /// ++ /// Note that there may be currently buffered data when this function is ++ /// called, and in that case the buffered data is discarded. ++ pub fn reset(&mut self, r: R) -> R { ++ super::bufread::reset_encoder_data(&mut self.inner); ++ self.inner.get_mut().reset(r) ++ } ++ ++ /// Acquires a reference to the underlying stream ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this encoder, returning the underlying reader. ++ /// ++ /// Note that there may be buffered bytes which are not re-acquired as part ++ /// of this transition. It's recommended to only call this function after ++ /// EOF has been reached. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++ ++ /// Returns the number of bytes that have been read into this compressor. ++ /// ++ /// Note that not all bytes read from the underlying object may be accounted ++ /// for, there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been read yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out() ++ } ++} ++ ++impl Read for ZlibEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibEncoder {} ++ ++impl Write for ZlibEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} ++ ++/// A ZLIB decoder, or decompressor. ++/// ++/// This structure implements a [`Read`] interface and takes a stream of ++/// compressed data as input, providing the decompressed data when read from. ++/// ++/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::ZlibEncoder; ++/// use flate2::read::ZlibDecoder; ++/// ++/// # fn main() { ++/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++/// // Here &[u8] implements Read ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut z = ZlibDecoder::new(&bytes[..]); ++/// let mut s = String::new(); ++/// z.read_to_string(&mut s)?; ++/// Ok(s) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibDecoder { ++ inner: bufread::ZlibDecoder>, ++} ++ ++impl ZlibDecoder { ++ /// Creates a new decoder which will decompress data read from the given ++ /// stream. ++ pub fn new(r: R) -> ZlibDecoder { ++ ZlibDecoder::new_with_buf(r, vec![0; 32 * 1024]) ++ } ++ ++ /// Same as `new`, but the intermediate buffer for data is specified. ++ /// ++ /// Note that the specified buffer will only be used up to its current ++ /// length. The buffer's capacity will also not grow over time. ++ pub fn new_with_buf(r: R, buf: Vec) -> ZlibDecoder { ++ ZlibDecoder { ++ inner: bufread::ZlibDecoder::new(BufReader::with_buf(buf, r)), ++ } ++ } ++} ++ ++impl ZlibDecoder { ++ /// Resets the state of this decoder entirely, swapping out the input ++ /// stream for another. ++ /// ++ /// This will reset the internal state of this decoder and replace the ++ /// input stream with the one provided, returning the previous input ++ /// stream. Future data read from this decoder will be the decompressed ++ /// version of `r`'s data. ++ /// ++ /// Note that there may be currently buffered data when this function is ++ /// called, and in that case the buffered data is discarded. ++ pub fn reset(&mut self, r: R) -> R { ++ super::bufread::reset_decoder_data(&mut self.inner); ++ self.inner.get_mut().reset(r) ++ } ++ ++ /// Acquires a reference to the underlying stream ++ pub fn get_ref(&self) -> &R { ++ self.inner.get_ref().get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying stream ++ /// ++ /// Note that mutation of the stream may result in surprising results if ++ /// this encoder is continued to be used. ++ pub fn get_mut(&mut self) -> &mut R { ++ self.inner.get_mut().get_mut() ++ } ++ ++ /// Consumes this decoder, returning the underlying reader. ++ /// ++ /// Note that there may be buffered bytes which are not re-acquired as part ++ /// of this transition. It's recommended to only call this function after ++ /// EOF has been reached. ++ pub fn into_inner(self) -> R { ++ self.inner.into_inner().into_inner() ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed. ++ /// ++ /// Note that this will likely be smaller than what the decompressor ++ /// actually read from the underlying stream due to buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has produced. ++ pub fn total_out(&self) -> u64 { ++ self.inner.total_out() ++ } ++} ++ ++impl Read for ZlibDecoder { ++ fn read(&mut self, into: &mut [u8]) -> io::Result { ++ self.inner.read(into) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibDecoder {} ++ ++impl Write for ZlibDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.get_mut().write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.get_mut().flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ self.get_mut().shutdown() ++ } ++} diff --cc vendor/flate2-1.0.2/src/zlib/write.rs index 000000000,000000000..1ea9887b5 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/src/zlib/write.rs @@@ -1,0 -1,0 +1,348 @@@ ++use std::io::prelude::*; ++use std::io; ++ ++#[cfg(feature = "tokio")] ++use futures::Poll; ++#[cfg(feature = "tokio")] ++use tokio_io::{AsyncRead, AsyncWrite}; ++ ++use zio; ++use {Compress, Decompress}; ++ ++/// A ZLIB encoder, or compressor. ++/// ++/// This structure implements a [`Write`] interface and takes a stream of ++/// uncompressed data, writing the compressed data to the wrapped writer. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use flate2::Compression; ++/// use flate2::write::ZlibEncoder; ++/// ++/// // Vec implements Write, assigning the compressed bytes of sample string ++/// ++/// # fn zlib_encoding() -> std::io::Result<()> { ++/// let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++/// e.write_all(b"Hello World")?; ++/// let compressed = e.finish()?; ++/// # Ok(()) ++/// # } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibEncoder { ++ inner: zio::Writer, ++} ++ ++impl ZlibEncoder { ++ /// Creates a new encoder which will write compressed data to the stream ++ /// given at the given compression level. ++ /// ++ /// When this encoder is dropped or unwrapped the final pieces of data will ++ /// be flushed. ++ pub fn new(w: W, level: ::Compression) -> ZlibEncoder { ++ ZlibEncoder { ++ inner: zio::Writer::new(w, Compress::new(level, true)), ++ } ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutating the output/input state of the stream may corrupt this ++ /// object, so care must be taken when using this method. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut() ++ } ++ ++ /// Resets the state of this encoder entirely, swapping out the output ++ /// stream for another. ++ /// ++ /// This function will finish encoding the current stream into the current ++ /// output stream before swapping out the two output streams. ++ /// ++ /// After the current stream has been finished, this will reset the internal ++ /// state of this encoder and replace the output stream with the one ++ /// provided, returning the previous output stream. Future data written to ++ /// this encoder will be the compressed into the stream `w` provided. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn reset(&mut self, w: W) -> io::Result { ++ self.inner.finish()?; ++ self.inner.data.reset(); ++ Ok(self.inner.replace(w)) ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.inner.finish() ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream, close off the compressed ++ /// stream and, if successful, return the contained writer. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.inner.finish()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream and then return the contained ++ /// writer if the flush succeeded. ++ /// The compressed stream will not closed but only flushed. This ++ /// means that obtained byte array can by extended by another deflated ++ /// stream. To close the stream add the two bytes 0x3 and 0x0. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn flush_finish(mut self) -> io::Result { ++ self.inner.flush()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Returns the number of bytes that have been written to this compresor. ++ /// ++ /// Note that not all bytes written to this object may be accounted for, ++ /// there may still be some active buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the compressor has produced. ++ /// ++ /// Note that not all bytes may have been written yet, some may still be ++ /// buffered. ++ pub fn total_out(&self) -> u64 { ++ self.inner.data.total_out() ++ } ++} ++ ++impl Write for ZlibEncoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.inner.write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibEncoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.try_finish()); ++ self.get_mut().shutdown() ++ } ++} ++ ++impl Read for ZlibEncoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibEncoder {} ++ ++/// A ZLIB decoder, or decompressor. ++/// ++/// This structure implements a [`Write`] and will emit a stream of decompressed ++/// data when fed a stream of compressed data. ++/// ++/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html ++/// ++/// # Examples ++/// ++/// ``` ++/// use std::io::prelude::*; ++/// use std::io; ++/// # use flate2::Compression; ++/// # use flate2::write::ZlibEncoder; ++/// use flate2::write::ZlibDecoder; ++/// ++/// # fn main() { ++/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); ++/// # e.write_all(b"Hello World").unwrap(); ++/// # let bytes = e.finish().unwrap(); ++/// # println!("{}", decode_reader(bytes).unwrap()); ++/// # } ++/// # ++/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error ++/// // Here Vec implements Write ++/// ++/// fn decode_reader(bytes: Vec) -> io::Result { ++/// let mut writer = Vec::new(); ++/// let mut z = ZlibDecoder::new(writer); ++/// z.write_all(&bytes[..])?; ++/// writer = z.finish()?; ++/// let return_string = String::from_utf8(writer).expect("String parsing error"); ++/// Ok(return_string) ++/// } ++/// ``` ++#[derive(Debug)] ++pub struct ZlibDecoder { ++ inner: zio::Writer, ++} ++ ++impl ZlibDecoder { ++ /// Creates a new decoder which will write uncompressed data to the stream. ++ /// ++ /// When this decoder is dropped or unwrapped the final pieces of data will ++ /// be flushed. ++ pub fn new(w: W) -> ZlibDecoder { ++ ZlibDecoder { ++ inner: zio::Writer::new(w, Decompress::new(true)), ++ } ++ } ++ ++ /// Acquires a reference to the underlying writer. ++ pub fn get_ref(&self) -> &W { ++ self.inner.get_ref() ++ } ++ ++ /// Acquires a mutable reference to the underlying writer. ++ /// ++ /// Note that mutating the output/input state of the stream may corrupt this ++ /// object, so care must be taken when using this method. ++ pub fn get_mut(&mut self) -> &mut W { ++ self.inner.get_mut() ++ } ++ ++ /// Resets the state of this decoder entirely, swapping out the output ++ /// stream for another. ++ /// ++ /// This will reset the internal state of this decoder and replace the ++ /// output stream with the one provided, returning the previous output ++ /// stream. Future data written to this decoder will be decompressed into ++ /// the output stream `w`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn reset(&mut self, w: W) -> io::Result { ++ self.inner.finish()?; ++ self.inner.data = Decompress::new(true); ++ Ok(self.inner.replace(w)) ++ } ++ ++ /// Attempt to finish this output stream, writing out final chunks of data. ++ /// ++ /// Note that this function can only be used once data has finished being ++ /// written to the output stream. After this function is called then further ++ /// calls to `write` may result in a panic. ++ /// ++ /// # Panics ++ /// ++ /// Attempts to write data to this stream may result in a panic after this ++ /// function is called. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn try_finish(&mut self) -> io::Result<()> { ++ self.inner.finish() ++ } ++ ++ /// Consumes this encoder, flushing the output stream. ++ /// ++ /// This will flush the underlying data stream and then return the contained ++ /// writer if the flush succeeded. ++ /// ++ /// Note that this function may not be suitable to call in a situation where ++ /// the underlying stream is an asynchronous I/O stream. To finish a stream ++ /// the `try_finish` (or `shutdown`) method should be used instead. To ++ /// re-acquire ownership of a stream it is safe to call this method after ++ /// `try_finish` or `shutdown` has returned `Ok`. ++ /// ++ /// # Errors ++ /// ++ /// This function will perform I/O to complete this stream, and any I/O ++ /// errors which occur will be returned from this function. ++ pub fn finish(mut self) -> io::Result { ++ self.inner.finish()?; ++ Ok(self.inner.take_inner()) ++ } ++ ++ /// Returns the number of bytes that the decompressor has consumed for ++ /// decompression. ++ /// ++ /// Note that this will likely be smaller than the number of bytes ++ /// successfully written to this stream due to internal buffering. ++ pub fn total_in(&self) -> u64 { ++ self.inner.data.total_in() ++ } ++ ++ /// Returns the number of bytes that the decompressor has written to its ++ /// output stream. ++ pub fn total_out(&self) -> u64 { ++ self.inner.data.total_out() ++ } ++} ++ ++impl Write for ZlibDecoder { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.inner.write(buf) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.inner.flush() ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncWrite for ZlibDecoder { ++ fn shutdown(&mut self) -> Poll<(), io::Error> { ++ try_nb!(self.inner.finish()); ++ self.inner.get_mut().shutdown() ++ } ++} ++ ++impl Read for ZlibDecoder { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ self.inner.get_mut().read(buf) ++ } ++} ++ ++#[cfg(feature = "tokio")] ++impl AsyncRead for ZlibDecoder {} diff --cc vendor/flate2-1.0.2/tests/corrupt-file.gz index 000000000,000000000..159333b03 new file mode 100644 Binary files differ diff --cc vendor/flate2-1.0.2/tests/early-flush.rs index 000000000,000000000..537e9e9ac new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/early-flush.rs @@@ -1,0 -1,0 +1,20 @@@ ++extern crate flate2; ++ ++use std::io::{Read, Write}; ++ ++use flate2::write::GzEncoder; ++use flate2::read::GzDecoder; ++ ++#[test] ++fn smoke() { ++ let mut w = GzEncoder::new(Vec::new(), flate2::Compression::default()); ++ w.flush().unwrap(); ++ w.write_all(b"hello").unwrap(); ++ ++ let bytes = w.finish().unwrap(); ++ ++ let mut r = GzDecoder::new(&bytes[..]); ++ let mut s = String::new(); ++ r.read_to_string(&mut s).unwrap(); ++ assert_eq!(s, "hello"); ++} diff --cc vendor/flate2-1.0.2/tests/empty-read.rs index 000000000,000000000..755123833 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/empty-read.rs @@@ -1,0 -1,0 +1,82 @@@ ++extern crate flate2; ++ ++use std::io::{Read, Write}; ++ ++#[test] ++fn deflate_decoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = ++ flate2::write::DeflateEncoder::new(Vec::new(), flate2::Compression::default()); ++ encoder.write_all(original).unwrap(); ++ let encoded: Vec = encoder.finish().unwrap(); ++ let mut decoder = flate2::read::DeflateDecoder::new(encoded.as_slice()); ++ assert_eq!(decoder.read(&mut []).unwrap(), 0); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} ++ ++#[test] ++fn deflate_encoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = flate2::read::DeflateEncoder::new(original, flate2::Compression::default()); ++ assert_eq!(encoder.read(&mut []).unwrap(), 0); ++ let mut encoded = Vec::new(); ++ encoder.read_to_end(&mut encoded).unwrap(); ++ let mut decoder = flate2::read::DeflateDecoder::new(encoded.as_slice()); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} ++ ++#[test] ++fn gzip_decoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); ++ encoder.write_all(original).unwrap(); ++ let encoded: Vec = encoder.finish().unwrap(); ++ let mut decoder = flate2::read::GzDecoder::new(encoded.as_slice()); ++ assert_eq!(decoder.read(&mut []).unwrap(), 0); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} ++ ++#[test] ++fn gzip_encoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = flate2::read::GzEncoder::new(original, flate2::Compression::default()); ++ assert_eq!(encoder.read(&mut []).unwrap(), 0); ++ let mut encoded = Vec::new(); ++ encoder.read_to_end(&mut encoded).unwrap(); ++ let mut decoder = flate2::read::GzDecoder::new(encoded.as_slice()); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} ++ ++#[test] ++fn zlib_decoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); ++ encoder.write_all(original).unwrap(); ++ let encoded: Vec = encoder.finish().unwrap(); ++ let mut decoder = flate2::read::ZlibDecoder::new(encoded.as_slice()); ++ assert_eq!(decoder.read(&mut []).unwrap(), 0); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} ++ ++#[test] ++fn zlib_encoder_empty_read() { ++ let original: &[u8] = b"Lorem ipsum dolor sit amet."; ++ let mut encoder = flate2::read::ZlibEncoder::new(original, flate2::Compression::default()); ++ assert_eq!(encoder.read(&mut []).unwrap(), 0); ++ let mut encoded = Vec::new(); ++ encoder.read_to_end(&mut encoded).unwrap(); ++ let mut decoder = flate2::read::ZlibDecoder::new(encoded.as_slice()); ++ let mut decoded = Vec::new(); ++ decoder.read_to_end(&mut decoded).unwrap(); ++ assert_eq!(decoded.as_slice(), original); ++} diff --cc vendor/flate2-1.0.2/tests/good-file.gz index 000000000,000000000..f968689cc new file mode 100644 Binary files differ diff --cc vendor/flate2-1.0.2/tests/good-file.txt index 000000000,000000000..ee39ac53d new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/good-file.txt @@@ -1,0 -1,0 +1,733 @@@ ++## ## ++timestep simulated EIR patent hosts ++0 0.136402 16855 ++1 0.146872 18564 ++2 0.150157 20334 ++3 0.146358 22159 ++4 0.136315 23655 ++5 0.122354 24848 ++6 0.104753 25887 ++7 0.084439 26770 ++8 0.06417 27238 ++9 0.0450397 27349 ++10 0.0295473 27274 ++11 0.0184662 26909 ++12 0.0110032 26324 ++13 0.00634348 25513 ++14 0.0036144 24469 ++15 0.00208133 23383 ++16 0.00122468 22345 ++17 0.000752514 21342 ++18 0.000545333 20416 ++19 0.000546139 19657 ++20 0.00054572 18806 ++21 0.000545757 18015 ++22 0.000545898 17349 ++23 0.000546719 16594 ++24 0.000547353 15955 ++25 0.000547944 15374 ++26 0.000547606 14765 ++27 0.000594773 14212 ++28 0.000969163 13677 ++29 0.00168295 13180 ++30 0.003059 12760 ++31 0.00571599 12313 ++32 0.0107918 11896 ++33 0.0201943 11512 ++34 0.0368013 11340 ++35 0.0640629 11323 ++36 0.104447 11769 ++37 0.157207 12728 ++38 0.216682 14261 ++39 0.271159 16491 ++40 0.303552 19274 ++41 0.303678 22157 ++42 0.271945 24875 ++43 0.215445 27027 ++44 0.154503 28690 ++45 0.100717 30046 ++46 0.0600343 30602 ++47 0.0328576 30709 ++48 0.016964 30315 ++49 0.00841526 29310 ++50 0.0040958 28058 ++51 0.0019953 26662 ++52 0.000986531 25259 ++53 0.000545786 24049 ++54 0.000546405 22966 ++55 0.000546036 21933 ++56 0.00054427 20953 ++57 0.000542769 20057 ++58 0.000541566 19304 ++59 0.000541822 18477 ++60 0.000541643 17695 ++61 0.000541989 17002 ++62 0.000769298 16391 ++63 0.00150811 15805 ++64 0.00295097 15172 ++65 0.00566197 14690 ++66 0.0105243 14206 ++67 0.0186965 13791 ++68 0.0313363 13470 ++69 0.0490605 13377 ++70 0.0711679 13631 ++71 0.0953625 14209 ++72 0.118026 15277 ++73 0.134612 16760 ++74 0.144311 18339 ++75 0.146328 20124 ++76 0.142936 21803 ++77 0.134029 23435 ++78 0.120562 24854 ++79 0.103157 25880 ++80 0.0834054 26597 ++81 0.0632474 27226 ++82 0.0447785 27294 ++83 0.0295654 27169 ++84 0.0184081 26803 ++85 0.0109489 26265 ++86 0.00631234 25375 ++87 0.00359978 24306 ++88 0.00206967 23260 ++89 0.00122197 22225 ++90 0.000751031 21277 ++91 0.000544507 20295 ++92 0.000543897 19417 ++93 0.000543483 18623 ++94 0.000542926 17837 ++95 0.000542685 17070 ++96 0.000542387 16424 ++97 0.000541194 15838 ++98 0.000540427 15177 ++99 0.000540774 14608 ++100 0.000588312 14066 ++101 0.000959183 13499 ++102 0.00166774 12979 ++103 0.00303278 12545 ++104 0.00567457 12067 ++105 0.0107272 11712 ++106 0.0200606 11368 ++107 0.0364637 11207 ++108 0.063339 11238 ++109 0.103717 11660 ++110 0.156884 12621 ++111 0.217072 14151 ++112 0.272311 16358 ++113 0.305046 19005 ++114 0.304927 21926 ++115 0.272427 24662 ++116 0.216478 27080 ++117 0.155168 29064 ++118 0.10079 30370 ++119 0.0599659 30992 ++120 0.0331287 30975 ++121 0.017235 30317 ++122 0.00860221 29455 ++123 0.00419286 28172 ++124 0.00203361 26809 ++125 0.000998847 25476 ++126 0.000551418 24230 ++127 0.000551119 23106 ++128 0.000552786 22147 ++129 0.000553814 21183 ++130 0.000553743 20280 ++131 0.000554428 19423 ++132 0.000555022 18598 ++133 0.000555921 17864 ++134 0.000556687 17187 ++135 0.000789996 16527 ++136 0.00154597 15870 ++137 0.00302776 15226 ++138 0.00581484 14685 ++139 0.010812 14234 ++140 0.0191832 13818 ++141 0.0321572 13571 ++142 0.050328 13538 ++143 0.072817 13812 ++144 0.0974321 14368 ++145 0.120225 15436 ++146 0.137418 16988 ++147 0.147086 18775 ++148 0.149165 20563 ++149 0.144943 22223 ++150 0.136631 23741 ++151 0.123355 24920 ++152 0.105401 25779 ++153 0.0851918 26781 ++154 0.0641702 27265 ++155 0.0450746 27505 ++156 0.0294136 27416 ++157 0.0183811 27028 ++158 0.0109285 26260 ++159 0.00634296 25451 ++160 0.00364513 24472 ++161 0.0021051 23427 ++162 0.00123693 22403 ++163 0.000759531 21393 ++164 0.000551727 20485 ++165 0.000552256 19660 ++166 0.000552303 18862 ++167 0.000550927 18094 ++168 0.000551098 17378 ++169 0.000551093 16691 ++170 0.000551885 16050 ++171 0.000552282 15420 ++172 0.000552591 14878 ++173 0.00060109 14357 ++174 0.000980446 13768 ++175 0.00170301 13241 ++176 0.003096 12745 ++177 0.00579971 12294 ++178 0.010976 11879 ++179 0.0205422 11636 ++180 0.0374515 11431 ++181 0.0649916 11517 ++182 0.106008 11966 ++183 0.159983 12918 ++184 0.221127 14484 ++185 0.276503 16696 ++186 0.310316 19518 ++187 0.311205 22301 ++188 0.276769 25047 ++189 0.220506 27360 ++190 0.159123 29133 ++191 0.103761 30440 ++192 0.0613797 31087 ++193 0.033583 31037 ++194 0.0173275 30555 ++195 0.00861968 29617 ++196 0.00419503 28292 ++197 0.00203304 26944 ++198 0.00100126 25569 ++199 0.000553511 24349 ++200 0.000554687 23257 ++201 0.00055586 22204 ++202 0.000555419 21176 ++203 0.000556032 20316 ++204 0.000555974 19509 ++205 0.000556859 18746 ++206 0.000556996 17978 ++207 0.000557102 17288 ++208 0.000790187 16672 ++209 0.00154711 16057 ++210 0.00303521 15449 ++211 0.00584201 14915 ++212 0.0108854 14397 ++213 0.0193386 14010 ++214 0.0324346 13730 ++215 0.0507192 13674 ++216 0.0736661 13874 ++217 0.0987887 14515 ++218 0.122411 15693 ++219 0.139964 17265 ++220 0.149125 18894 ++221 0.151434 20662 ++222 0.148067 22442 ++223 0.138894 24116 ++224 0.125436 25367 ++225 0.107664 26360 ++226 0.0865709 27044 ++227 0.0655588 27428 ++228 0.0459664 27714 ++229 0.0301384 27687 ++230 0.0186481 27262 ++231 0.01103 26677 ++232 0.00636957 25722 ++233 0.00366188 24662 ++234 0.00212213 23575 ++235 0.00125358 22520 ++236 0.000768665 21480 ++237 0.000556393 20563 ++238 0.000555892 19706 ++239 0.00055534 18914 ++240 0.000555027 18165 ++241 0.000555062 17432 ++242 0.000553766 16733 ++243 0.000552984 16070 ++244 0.000553634 15396 ++245 0.000554286 14867 ++246 0.000603759 14362 ++247 0.000982974 13867 ++248 0.00170532 13379 ++249 0.00310471 12907 ++250 0.00582577 12446 ++251 0.0110122 12018 ++252 0.0206284 11730 ++253 0.0375835 11546 ++254 0.0652192 11605 ++255 0.10646 11981 ++256 0.160858 12949 ++257 0.223122 14478 ++258 0.279678 16810 ++259 0.312171 19452 ++260 0.311778 22391 ++261 0.276966 25204 ++262 0.22251 27379 ++263 0.159246 29248 ++264 0.104109 30532 ++265 0.0617903 30995 ++266 0.0338421 31042 ++267 0.0174647 30620 ++268 0.00867821 29589 ++269 0.00419968 28293 ++270 0.00203244 26916 ++271 0.00100204 25464 ++272 0.000555586 24219 ++273 0.000555599 23207 ++274 0.00055582 22187 ++275 0.00055516 21136 ++276 0.000555436 20243 ++277 0.000555618 19426 ++278 0.000556778 18635 ++279 0.000556976 17870 ++280 0.000557162 17190 ++281 0.0007904 16506 ++282 0.00154557 15837 ++283 0.00302973 15234 ++284 0.00584543 14717 ++285 0.0108796 14225 ++286 0.0192919 13810 ++287 0.032329 13605 ++288 0.0505293 13536 ++289 0.0733417 13760 ++290 0.0982413 14378 ++291 0.121477 15400 ++292 0.138636 17017 ++293 0.14875 18764 ++294 0.150515 20516 ++295 0.146372 22389 ++296 0.137332 23975 ++297 0.124076 25120 ++298 0.106469 26137 ++299 0.0862987 26973 ++300 0.0650552 27584 ++301 0.0456456 27741 ++302 0.0300744 27565 ++303 0.0187879 27212 ++304 0.0112085 26432 ++305 0.00648306 25501 ++306 0.00370346 24466 ++307 0.00213399 23472 ++308 0.00125463 22415 ++309 0.000765794 21427 ++310 0.000552587 20533 ++311 0.000553175 19632 ++312 0.000553525 18831 ++313 0.000554941 18119 ++314 0.000556327 17336 ++315 0.000556008 16721 ++316 0.00055593 16086 ++317 0.000556421 15516 ++318 0.000557308 14918 ++319 0.00060681 14402 ++320 0.000990746 13849 ++321 0.00172359 13355 ++322 0.00313688 12902 ++323 0.0058708 12425 ++324 0.0110637 12087 ++325 0.0206777 11743 ++326 0.0376394 11531 ++327 0.0656182 11582 ++328 0.107414 12034 ++329 0.162101 12955 ++330 0.223525 14571 ++331 0.279935 16842 ++332 0.314601 19566 ++333 0.313556 22575 ++334 0.279571 25279 ++335 0.221638 27642 ++336 0.158038 29275 ++337 0.102505 30638 ++338 0.0608328 31209 ++339 0.0335531 31260 ++340 0.0173332 30520 ++341 0.00861545 29604 ++342 0.00419454 28370 ++343 0.00202587 26940 ++344 0.000994029 25614 ++345 0.000549339 24445 ++346 0.000551477 23239 ++347 0.000552891 22300 ++348 0.000551775 21280 ++349 0.000552425 20424 ++350 0.000552135 19571 ++351 0.000552542 18753 ++352 0.000552863 18058 ++353 0.000554438 17348 ++354 0.000786735 16671 ++355 0.00153958 16047 ++356 0.00301482 15500 ++357 0.00580589 14883 ++358 0.0108227 14347 ++359 0.0192357 13947 ++360 0.0321613 13672 ++361 0.050229 13606 ++362 0.0729462 13815 ++363 0.0978564 14566 ++364 0.120879 15674 ++365 0.137663 17049 ++366 0.147092 18813 ++367 0.150184 20578 ++368 0.146971 22245 ++369 0.136769 23723 ++370 0.12367 24905 ++371 0.106187 25871 ++372 0.0860921 26687 ++373 0.0645899 27375 ++374 0.0453473 27635 ++375 0.0298122 27551 ++376 0.0185448 27134 ++377 0.0110517 26468 ++378 0.00640294 25661 ++379 0.00367011 24653 ++380 0.00211832 23556 ++381 0.00125246 22513 ++382 0.00076891 21568 ++383 0.000557384 20672 ++384 0.000557295 19811 ++385 0.000556837 18982 ++386 0.000557433 18179 ++387 0.000557376 17457 ++388 0.000557751 16720 ++389 0.000556844 16112 ++390 0.000555603 15479 ++391 0.000554871 14809 ++392 0.00060335 14275 ++393 0.000982808 13757 ++394 0.00170757 13221 ++395 0.00310351 12758 ++396 0.0058181 12286 ++397 0.010991 11906 ++398 0.0205342 11557 ++399 0.0373486 11393 ++400 0.0647659 11487 ++401 0.105589 11887 ++402 0.15967 12798 ++403 0.220945 14260 ++404 0.277122 16477 ++405 0.310108 19295 ++406 0.308854 22110 ++407 0.274911 24915 ++408 0.218618 27273 ++409 0.156618 29189 ++410 0.101775 30572 ++411 0.0607503 31174 ++412 0.0334708 31316 ++413 0.0173443 30731 ++414 0.00865633 29636 ++415 0.00421141 28342 ++416 0.00204387 26991 ++417 0.00100602 25595 ++418 0.000555131 24336 ++419 0.000555037 23251 ++420 0.000555559 22267 ++421 0.000554916 21212 ++422 0.000554432 20306 ++423 0.000554751 19488 ++424 0.00055638 18727 ++425 0.000556727 17927 ++426 0.000556368 17198 ++427 0.000788004 16578 ++428 0.00154404 15944 ++429 0.00302383 15315 ++430 0.00582586 14786 ++431 0.0108457 14290 ++432 0.0192962 13815 ++433 0.0323072 13561 ++434 0.0505101 13456 ++435 0.0732162 13811 ++436 0.0978737 14403 ++437 0.121405 15460 ++438 0.138202 16993 ++439 0.1482 18710 ++440 0.149707 20578 ++441 0.146945 22256 ++442 0.137785 23713 ++443 0.123767 25058 ++444 0.105989 26087 ++445 0.085483 26759 ++446 0.0646144 27375 ++447 0.0454389 27680 ++448 0.0299337 27531 ++449 0.018663 27041 ++450 0.0111347 26416 ++451 0.00644197 25614 ++452 0.00369229 24666 ++453 0.00211986 23647 ++454 0.00124761 22650 ++455 0.000769104 21642 ++456 0.000558796 20693 ++457 0.000559908 19746 ++458 0.000559562 18952 ++459 0.00056042 18100 ++460 0.000559447 17401 ++461 0.000557893 16756 ++462 0.000557137 16148 ++463 0.000557269 15504 ++464 0.000557596 14974 ++465 0.000606298 14408 ++466 0.000987712 13909 ++467 0.00171257 13402 ++468 0.00311667 12891 ++469 0.00584794 12433 ++470 0.0110774 11980 ++471 0.0207006 11713 ++472 0.037673 11583 ++473 0.0654988 11677 ++474 0.106982 12072 ++475 0.161926 12898 ++476 0.224327 14548 ++477 0.281709 16796 ++478 0.314567 19512 ++479 0.313419 22428 ++480 0.278962 25186 ++481 0.221864 27755 ++482 0.158559 29556 ++483 0.103532 30572 ++484 0.0611592 31162 ++485 0.0337539 31197 ++486 0.0175096 30619 ++487 0.00865906 29606 ++488 0.00420125 28271 ++489 0.00203207 26856 ++490 0.00100238 25542 ++491 0.000554405 24306 ++492 0.00055373 23160 ++493 0.0005552 22152 ++494 0.000553776 21192 ++495 0.000553636 20302 ++496 0.000553165 19505 ++497 0.000554014 18719 ++498 0.00055519 17993 ++499 0.000556582 17233 ++500 0.000788165 16569 ++501 0.00154132 15953 ++502 0.00302099 15350 ++503 0.00581186 14752 ++504 0.0108291 14267 ++505 0.0192368 13946 ++506 0.0322191 13677 ++507 0.0503789 13594 ++508 0.0730706 13768 ++509 0.0980646 14416 ++510 0.121601 15634 ++511 0.139046 17110 ++512 0.147779 18876 ++513 0.149612 20734 ++514 0.145796 22414 ++515 0.136936 23884 ++516 0.123807 25078 ++517 0.106212 26066 ++518 0.0855482 26779 ++519 0.0643386 27340 ++520 0.0452926 27530 ++521 0.0298659 27573 ++522 0.0185447 27169 ++523 0.0110178 26489 ++524 0.00635235 25588 ++525 0.00362881 24549 ++526 0.00209238 23528 ++527 0.00123133 22541 ++528 0.000755917 21498 ++529 0.000546368 20607 ++530 0.000547382 19712 ++531 0.000547084 18975 ++532 0.000546453 18178 ++533 0.000546062 17452 ++534 0.000546085 16749 ++535 0.000546151 16135 ++536 0.000545628 15567 ++537 0.000545969 14968 ++538 0.000594606 14392 ++539 0.000968849 13854 ++540 0.00168489 13360 ++541 0.00306337 12899 ++542 0.00573505 12407 ++543 0.0108348 12017 ++544 0.02025 11713 ++545 0.0368201 11517 ++546 0.0639795 11556 ++547 0.104882 11941 ++548 0.158923 12854 ++549 0.219796 14396 ++550 0.275801 16733 ++551 0.307622 19367 ++552 0.30785 22230 ++553 0.272898 24873 ++554 0.217351 27152 ++555 0.156138 29108 ++556 0.101477 30379 ++557 0.0601091 30971 ++558 0.0331551 31126 ++559 0.017167 30418 ++560 0.00853886 29430 ++561 0.00415201 28190 ++562 0.00201849 26849 ++563 0.000991957 25528 ++564 0.000546751 24180 ++565 0.00054534 23090 ++566 0.000544403 22096 ++567 0.00054368 21140 ++568 0.000543407 20213 ++569 0.000544421 19405 ++570 0.000545241 18625 ++571 0.000546995 17868 ++572 0.000547101 17102 ++573 0.00077428 16423 ++574 0.00151348 15783 ++575 0.00296212 15220 ++576 0.00569555 14602 ++577 0.0106307 14154 ++578 0.0188783 13743 ++579 0.0316572 13538 ++580 0.0495211 13467 ++581 0.0718936 13665 ++582 0.0961304 14240 ++583 0.119127 15341 ++584 0.136233 16912 ++585 0.145327 18567 ++586 0.146983 20301 ++587 0.143022 21953 ++588 0.134931 23439 ++589 0.121892 24750 ++590 0.103955 25688 ++591 0.0833804 26253 ++592 0.0625106 26918 ++593 0.0440419 27279 ++594 0.0290823 27159 ++595 0.0180758 26786 ++596 0.0107654 26049 ++597 0.00622673 25202 ++598 0.00356716 24168 ++599 0.00205866 23122 ++600 0.00121254 22076 ++601 0.000745744 21100 ++602 0.000537789 20207 ++603 0.000537982 19340 ++604 0.000537795 18527 ++605 0.000537955 17768 ++606 0.000539259 17117 ++607 0.00053942 16425 ++608 0.000540477 15701 ++609 0.000540424 15134 ++610 0.000540084 14558 ++611 0.00058571 14069 ++612 0.00095364 13498 ++613 0.00165505 13054 ++614 0.00300205 12616 ++615 0.00561724 12142 ++616 0.0106079 11720 ++617 0.0198178 11410 ++618 0.0360368 11231 ++619 0.0623418 11314 ++620 0.101856 11688 ++621 0.15376 12623 ++622 0.213046 14078 ++623 0.267285 16225 ++624 0.299225 18856 ++625 0.299517 21756 ++626 0.26697 24652 ++627 0.2119 27051 ++628 0.151393 28925 ++629 0.098869 30065 ++630 0.0593653 30570 ++631 0.0327177 30483 ++632 0.0170081 29735 ++633 0.0084493 28844 ++634 0.00409333 27665 ++635 0.00197466 26356 ++636 0.000967996 25009 ++637 0.000533137 23839 ++638 0.000532992 22721 ++639 0.000534258 21676 ++640 0.000534251 20709 ++641 0.000534556 19798 ++642 0.000535287 19008 ++643 0.000536214 18278 ++644 0.000536647 17547 ++645 0.000536556 16901 ++646 0.000761043 16256 ++647 0.00149108 15621 ++648 0.00292808 15032 ++649 0.0056527 14504 ++650 0.0105421 14010 ++651 0.0186823 13646 ++652 0.0312164 13356 ++653 0.0485643 13404 ++654 0.0704061 13612 ++655 0.0945219 14230 ++656 0.117178 15374 ++657 0.134568 16843 ++658 0.144475 18492 ++659 0.146915 20238 ++660 0.14393 21958 ++661 0.134621 23537 ++662 0.121737 24773 ++663 0.104744 25772 ++664 0.0846226 26427 ++665 0.0639754 27040 ++666 0.0448457 27279 ++667 0.029482 27106 ++668 0.0183036 26853 ++669 0.0108721 26178 ++670 0.00627116 25425 ++671 0.0035776 24326 ++672 0.00206466 23279 ++673 0.00122064 22191 ++674 0.000751578 21231 ++675 0.000542574 20323 ++676 0.000540396 19496 ++677 0.000538805 18651 ++678 0.00053881 17920 ++679 0.000537801 17217 ++680 0.000537866 16520 ++681 0.000538522 15876 ++682 0.000538795 15229 ++683 0.000539519 14656 ++684 0.000587348 14121 ++685 0.000955855 13626 ++686 0.00165656 13086 ++687 0.00301095 12666 ++688 0.00564993 12250 ++689 0.0106767 11869 ++690 0.0199729 11524 ++691 0.03641 11331 ++692 0.0632378 11402 ++693 0.103483 11788 ++694 0.156399 12682 ++695 0.215591 14337 ++696 0.269462 16547 ++697 0.303615 19239 ++698 0.304506 22023 ++699 0.273068 24769 ++700 0.21682 27223 ++701 0.154934 29029 ++702 0.100495 30241 ++703 0.0597382 30801 ++704 0.0329221 30881 ++705 0.0170591 30288 ++706 0.00845353 29329 ++707 0.00408176 28108 ++708 0.00198037 26715 ++709 0.000977102 25340 ++710 0.000541566 24039 ++711 0.000542333 22965 ++712 0.000542417 21858 ++713 0.000541182 20952 ++714 0.00054038 20049 ++715 0.000539725 19192 ++716 0.000539603 18409 ++717 0.000539754 17700 ++718 0.000539679 16960 ++719 0.000763508 16287 ++720 0.00149327 15637 ++721 0.00292609 15057 ++722 0.00563308 14524 ++723 0.0104893 14003 ++724 0.0185874 13625 ++725 0.0310985 13319 ++726 0.0487417 13278 ++727 0.0707124 13502 ++728 0.0947795 14147 ++729 0.117155 15183 ++730 0.133995 16622 diff --cc vendor/flate2-1.0.2/tests/gunzip.rs index 000000000,000000000..c4628f9b8 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/gunzip.rs @@@ -1,0 -1,0 +1,77 @@@ ++extern crate flate2; ++ ++use std::fs::File; ++use std::io::prelude::*; ++use std::io::{self, BufReader}; ++use std::path::Path; ++use flate2::read::GzDecoder; ++use flate2::read::MultiGzDecoder; ++ ++// test extraction of a gzipped file ++#[test] ++fn test_extract_success() { ++ let content = extract_file(Path::new("tests/good-file.gz")).unwrap(); ++ let mut expected = Vec::new(); ++ File::open("tests/good-file.txt") ++ .unwrap() ++ .read_to_end(&mut expected) ++ .unwrap(); ++ assert!(content == expected); ++} ++// ++// test partial extraction of a multistream gzipped file ++#[test] ++fn test_extract_success_partial_multi() { ++ let content = extract_file(Path::new("tests/multi.gz")).unwrap(); ++ let mut expected = String::new(); ++ BufReader::new(File::open("tests/multi.txt").unwrap()) ++ .read_line(&mut expected) ++ .unwrap(); ++ assert_eq!(content, expected.as_bytes()); ++} ++ ++// test extraction fails on a corrupt file ++#[test] ++fn test_extract_failure() { ++ let result = extract_file(Path::new("tests/corrupt-file.gz")); ++ assert_eq!(result.err().unwrap().kind(), io::ErrorKind::InvalidInput); ++} ++ ++//test complete extraction of a multistream gzipped file ++#[test] ++fn test_extract_success_multi() { ++ let content = extract_file_multi(Path::new("tests/multi.gz")).unwrap(); ++ let mut expected = Vec::new(); ++ File::open("tests/multi.txt") ++ .unwrap() ++ .read_to_end(&mut expected) ++ .unwrap(); ++ assert_eq!(content, expected); ++} ++ ++// Tries to extract path into memory (assuming a .gz file). ++fn extract_file(path_compressed: &Path) -> io::Result> { ++ let mut v = Vec::new(); ++ let f = File::open(path_compressed)?; ++ GzDecoder::new(f).read_to_end(&mut v)?; ++ Ok(v) ++} ++ ++// Tries to extract path into memory (decompressing all members in case ++// of a multi member .gz file). ++fn extract_file_multi(path_compressed: &Path) -> io::Result> { ++ let mut v = Vec::new(); ++ let f = File::open(path_compressed)?; ++ MultiGzDecoder::new(f).read_to_end(&mut v)?; ++ Ok(v) ++} ++ ++#[test] ++fn empty_error_once() { ++ let data: &[u8] = &[]; ++ let cbjson = GzDecoder::new(data); ++ let reader = BufReader::new(cbjson); ++ let mut stream = reader.lines(); ++ assert!(stream.next().unwrap().is_err()); ++ assert!(stream.next().is_none()); ++} diff --cc vendor/flate2-1.0.2/tests/multi.gz index 000000000,000000000..cabc89630 new file mode 100644 Binary files differ diff --cc vendor/flate2-1.0.2/tests/multi.txt index 000000000,000000000..66a52ee7a new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/multi.txt @@@ -1,0 -1,0 +1,2 @@@ ++first ++second diff --cc vendor/flate2-1.0.2/tests/tokio.rs index 000000000,000000000..0ce10647b new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/tokio.rs @@@ -1,0 -1,0 +1,130 @@@ ++#![cfg(feature = "tokio")] ++ ++extern crate flate2; ++extern crate futures; ++extern crate rand; ++extern crate tokio_core; ++extern crate tokio_io; ++ ++use std::thread; ++use std::net::{Shutdown, TcpListener}; ++use std::io::{Read, Write}; ++ ++use flate2::Compression; ++use flate2::read; ++use flate2::write; ++use futures::Future; ++use rand::{thread_rng, Rng}; ++use tokio_core::net::TcpStream; ++use tokio_core::reactor::Core; ++use tokio_io::AsyncRead; ++use tokio_io::io::{copy, shutdown}; ++ ++#[test] ++fn tcp_stream_echo_pattern() { ++ const N: u8 = 16; ++ const M: usize = 16 * 1024; ++ ++ let mut core = Core::new().unwrap(); ++ let listener = TcpListener::bind("127.0.0.1:0").unwrap(); ++ let addr = listener.local_addr().unwrap(); ++ let t = thread::spawn(move || { ++ let a = listener.accept().unwrap().0; ++ let b = a.try_clone().unwrap(); ++ ++ let t = thread::spawn(move || { ++ let mut b = read::DeflateDecoder::new(b); ++ let mut buf = [0; M]; ++ for i in 0..N { ++ b.read_exact(&mut buf).unwrap(); ++ for byte in buf.iter() { ++ assert_eq!(*byte, i); ++ } ++ } ++ ++ assert_eq!(b.read(&mut buf).unwrap(), 0); ++ }); ++ ++ let mut a = write::ZlibEncoder::new(a, Compression::default()); ++ for i in 0..N { ++ let buf = [i; M]; ++ a.write_all(&buf).unwrap(); ++ } ++ a.finish().unwrap().shutdown(Shutdown::Write).unwrap(); ++ ++ t.join().unwrap(); ++ }); ++ ++ let handle = core.handle(); ++ let stream = TcpStream::connect(&addr, &handle); ++ let copy = stream ++ .and_then(|s| { ++ let (a, b) = s.split(); ++ let a = read::ZlibDecoder::new(a); ++ let b = write::DeflateEncoder::new(b, Compression::default()); ++ copy(a, b) ++ }) ++ .then(|result| { ++ let (amt, _a, b) = result.unwrap(); ++ assert_eq!(amt, (N as u64) * (M as u64)); ++ shutdown(b).map(|_| ()) ++ }); ++ ++ core.run(copy).unwrap(); ++ t.join().unwrap(); ++} ++ ++#[test] ++fn echo_random() { ++ let v = thread_rng() ++ .gen_iter::() ++ .take(1024 * 1024) ++ .collect::>(); ++ let mut core = Core::new().unwrap(); ++ let listener = TcpListener::bind("127.0.0.1:0").unwrap(); ++ let addr = listener.local_addr().unwrap(); ++ let v2 = v.clone(); ++ let t = thread::spawn(move || { ++ let a = listener.accept().unwrap().0; ++ let b = a.try_clone().unwrap(); ++ ++ let mut v3 = v2.clone(); ++ let t = thread::spawn(move || { ++ let mut b = read::DeflateDecoder::new(b); ++ let mut buf = [0; 1024]; ++ while v3.len() > 0 { ++ let n = b.read(&mut buf).unwrap(); ++ for (actual, expected) in buf[..n].iter().zip(&v3) { ++ assert_eq!(*actual, *expected); ++ } ++ v3.drain(..n); ++ } ++ ++ assert_eq!(b.read(&mut buf).unwrap(), 0); ++ }); ++ ++ let mut a = write::ZlibEncoder::new(a, Compression::default()); ++ a.write_all(&v2).unwrap(); ++ a.finish().unwrap().shutdown(Shutdown::Write).unwrap(); ++ ++ t.join().unwrap(); ++ }); ++ ++ let handle = core.handle(); ++ let stream = TcpStream::connect(&addr, &handle); ++ let copy = stream ++ .and_then(|s| { ++ let (a, b) = s.split(); ++ let a = read::ZlibDecoder::new(a); ++ let b = write::DeflateEncoder::new(b, Compression::default()); ++ copy(a, b) ++ }) ++ .then(|result| { ++ let (amt, _a, b) = result.unwrap(); ++ assert_eq!(amt, v.len() as u64); ++ shutdown(b).map(|_| ()) ++ }); ++ ++ core.run(copy).unwrap(); ++ t.join().unwrap(); ++} diff --cc vendor/flate2-1.0.2/tests/zero-write.rs index 000000000,000000000..f0db86cb8 new file mode 100644 --- /dev/null +++ b/vendor/flate2-1.0.2/tests/zero-write.rs @@@ -1,0 -1,0 +1,8 @@@ ++extern crate flate2; ++ ++#[test] ++fn zero_write_is_error() { ++ let mut buf = [0u8]; ++ let writer = flate2::write::DeflateEncoder::new(&mut buf[..], flate2::Compression::default()); ++ assert!(writer.finish().is_err()); ++} diff --cc vendor/git2-0.7.5/.cargo-checksum.json index 000000000,000000000..6859a82fb new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"591f8be1674b421644b6c030969520bc3fa12114d2eb467471982ed3e9584e71"} diff --cc vendor/git2-0.7.5/.gitmodules index 000000000,000000000..95c45cce4 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/.gitmodules @@@ -1,0 -1,0 +1,3 @@@ ++[submodule "libgit2-sys/libgit2"] ++ path = libgit2-sys/libgit2 ++ url = https://github.com/libgit2/libgit2 diff --cc vendor/git2-0.7.5/.travis.yml index 000000000,000000000..10e97eafb new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/.travis.yml @@@ -1,0 -1,0 +1,54 @@@ ++language: rust ++sudo: false ++git: ++ submodules: false ++ ++matrix: ++ include: ++ - rust: 1.21.0 ++ - rust: stable ++ - os: osx ++ rust: stable ++ before_install: ++ - export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include ++ - export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib ++ - rust: beta ++ - rust: nightly ++ ++ - rust: nightly ++ before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++ script: ++ - cargo doc --no-deps ++ - cargo doc --manifest-path=git2-curl/Cargo.toml --no-deps ++ - cargo doc --manifest-path=libgit2-sys/Cargo.toml --no-deps ++ after_success: ++ - travis-cargo --only nightly doc-upload ++ ++script: ++ - git submodule update --init ++ - cargo test --no-default-features ++ - cargo test ++ - cargo run --manifest-path systest/Cargo.toml --release ++ - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then ++ cargo test --features unstable; ++ cargo test --manifest-path git2-curl/Cargo.toml; ++ fi ++ ++env: ++ global: ++ secure: "SVk5cv4VnBQAoaBXt9pIHk+FQ7Z58zT5EaPo7Ac81LltKztwHovhN/R1otKzgrAJqFsZ/nKR4cGyQGbYtfVJcsqweQVM75LI6Oh6lYyEdfX211ZI3SWQ50JO93CmwLtanC5UpECdXvJLCgXrHGJXuL1oi7hySGy47/yQlKH6eaM=" ++ ++notifications: ++ email: ++ on_success: never ++ ++addons: ++ apt: ++ sources: ++ - kalakris-cmake ++ packages: ++ - cmake ++ - libcurl4-openssl-dev ++ - libelf-dev ++ - libdw-dev diff --cc vendor/git2-0.7.5/Cargo.toml index 000000000,000000000..84699d7b4 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/Cargo.toml @@@ -1,0 -1,0 +1,75 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "git2" ++version = "0.7.5" ++authors = ["Alex Crichton "] ++description = "Bindings to libgit2 for interoperating with git repositories. This library is\nboth threadsafe and memory safe and allows both reading and writing git\nrepositories.\n" ++homepage = "https://github.com/alexcrichton/git2-rs" ++documentation = "https://docs.rs/git2" ++readme = "README.md" ++keywords = ["git"] ++categories = ["api-bindings"] ++license = "MIT/Apache-2.0" ++repository = "https://github.com/alexcrichton/git2-rs" ++[dependencies.bitflags] ++version = "1.0" ++ ++[dependencies.libc] ++version = "0.2" ++ ++[dependencies.libgit2-sys] ++version = "0.7.7" ++ ++[dependencies.log] ++version = "0.4" ++ ++[dependencies.url] ++version = "1.0" ++[dev-dependencies.docopt] ++version = "1.0" ++ ++[dev-dependencies.serde] ++version = "1.0" ++ ++[dev-dependencies.serde_derive] ++version = "1.0" ++ ++[dev-dependencies.tempdir] ++version = "0.3.7" ++ ++[dev-dependencies.thread-id] ++version = "3.3.0" ++ ++[dev-dependencies.time] ++version = "0.1.39" ++ ++[features] ++curl = ["libgit2-sys/curl"] ++default = ["ssh", "https", "curl", "ssh_key_from_memory"] ++https = ["libgit2-sys/https", "openssl-sys", "openssl-probe"] ++ssh = ["libgit2-sys/ssh"] ++ssh_key_from_memory = ["libgit2-sys/ssh_key_from_memory"] ++unstable = [] ++[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] ++version = "0.1" ++optional = true ++ ++[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] ++version = "0.9.0" ++optional = true ++[badges.appveyor] ++repository = "alexcrichton/git2-rs" ++ ++[badges.travis-ci] ++repository = "alexcrichton/git2-rs" diff --cc vendor/git2-0.7.5/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/git2-0.7.5/LICENSE-MIT index 000000000,000000000..39e0ed660 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2014 Alex Crichton ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/git2-0.7.5/README.md index 000000000,000000000..b060602d3 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/README.md @@@ -1,0 -1,0 +1,71 @@@ ++# git2-rs ++ ++[![Build Status](https://travis-ci.org/alexcrichton/git2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/git2-rs) ++[![Build Status](https://ci.appveyor.com/api/projects/status/6vem3xgno2kuxnfm?svg=true)](https://ci.appveyor.com/project/alexcrichton/git2-rs) ++ ++[Documentation](https://docs.rs/git2) ++ ++libgit2 bindings for Rust ++ ++```toml ++[dependencies] ++git2 = "0.7" ++``` ++ ++## Version of libgit2 ++ ++Currently this library requires libgit2 0.25.1. The source for libgit2 is ++included in the libgit2-sys crate so there's no need to pre-install the libgit2 ++library, the libgit2-sys crate will figure that and/or build that for you. ++ ++## Building git2-rs ++ ++First, you'll need to install _CMake_. Afterwards, just run: ++ ++```sh ++$ git clone https://github.com/alexcrichton/git2-rs ++$ cd git2-rs ++$ cargo build ++``` ++ ++### Automating Testing ++ ++Running tests and handling all of the associated edge cases on every commit ++proves tedious very quickly. To automate tests and handle proper stashing and ++unstashing of unstaged changes and thus avoid nasty surprises, use the ++pre-commit hook found [here][pre-commit-hook] and place it into the ++`.git/hooks/` with the name `pre-commit`. You may need to add execution ++permissions with `chmod +x`. ++ ++ ++To skip tests on a simple commit or doc-fixes, use `git commit --no-verify`. ++ ++## Building on OSX 10.10+ ++ ++Currently libssh2 requires linking against OpenSSL, and to compile libssh2 it ++also needs to find the OpenSSL headers. On OSX 10.10+ the OpenSSL headers have ++been removed, but if you're using Homebrew you can install them via: ++ ++```sh ++brew install openssl ++``` ++ ++ ++# License ++ ++This project is licensed under either of ++ ++ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ++ http://www.apache.org/licenses/LICENSE-2.0) ++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or ++ http://opensource.org/licenses/MIT) ++ ++at your option. ++ ++### Contribution ++ ++Unless you explicitly state otherwise, any contribution intentionally submitted ++for inclusion in git2-rs by you, as defined in the Apache-2.0 license, shall be ++dual licensed as above, without any additional terms or conditions. ++ ++[pre-commit-hook]: https://gist.github.com/glfmn/0c5e9e2b41b48007ed3497d11e3dbbfa diff --cc vendor/git2-0.7.5/appveyor.yml index 000000000,000000000..0182bfd8b new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/appveyor.yml @@@ -1,0 -1,0 +1,19 @@@ ++environment: ++ matrix: ++ - TARGET: x86_64-pc-windows-msvc ++ - TARGET: i686-pc-windows-msvc ++install: ++ - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" ++ - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" ++ - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin ++ - if defined MSYS_BITS set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% ++ - set CARGO_TARGET_DIR=%APPVEYOR_BUILD_FOLDER%\target ++ - rustc -V ++ - cargo -V ++ ++build: false ++ ++test_script: ++ - cargo test --target %TARGET% ++ - cargo test --no-default-features --target %TARGET% ++ - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff --cc vendor/git2-0.7.5/examples/add.rs index 000000000,000000000..3167cb074 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/add.rs @@@ -1,0 -1,0 +1,85 @@@ ++/* ++ * libgit2 "add" example - shows how to modify the index ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++#![allow(trivial_casts)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use std::path::Path; ++use docopt::Docopt; ++use git2::Repository; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_spec: Vec, ++ flag_dry_run: bool, ++ flag_verbose: bool, ++ flag_update: bool, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let repo = try!(Repository::open(&Path::new("."))); ++ let mut index = try!(repo.index()); ++ ++ let cb = &mut |path: &Path, _matched_spec: &[u8]| -> i32 { ++ let status = repo.status_file(path).unwrap(); ++ ++ let ret = if status.contains(git2::Status::WT_MODIFIED) || ++ status.contains(git2::Status::WT_NEW) { ++ println!("add '{}'", path.display()); ++ 0 ++ } else { ++ 1 ++ }; ++ ++ if args.flag_dry_run {1} else {ret} ++ }; ++ let cb = if args.flag_verbose || args.flag_update { ++ Some(cb as &mut git2::IndexMatchedPath) ++ } else { ++ None ++ }; ++ ++ if args.flag_update { ++ try!(index.update_all(args.arg_spec.iter(), cb)); ++ } else { ++ try!(index.add_all(args.arg_spec.iter(), git2::IndexAddOption::DEFAULT, cb)); ++ } ++ ++ try!(index.write()); ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: add [options] [--] [..] ++ ++Options: ++ -n, --dry-run dry run ++ -v, --verbose be verbose ++ -u, --update update tracked files ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/blame.rs index 000000000,000000000..5f7bee368 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/blame.rs @@@ -1,0 -1,0 +1,106 @@@ ++/* ++ * libgit2 "blame" example - shows how to use the blame API ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::{Repository, BlameOptions}; ++use std::path::Path; ++use std::io::{BufReader, BufRead}; ++ ++#[derive(Deserialize)] #[allow(non_snake_case)] ++struct Args { ++ arg_path: String, ++ arg_spec: Option, ++ flag_M: bool, ++ flag_C: bool, ++ flag_F: bool, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let repo = try!(Repository::open(".")); ++ let path = Path::new(&args.arg_path[..]); ++ ++ // Prepare our blame options ++ let mut opts = BlameOptions::new(); ++ opts.track_copies_same_commit_moves(args.flag_M) ++ .track_copies_same_commit_copies(args.flag_C) ++ .first_parent(args.flag_F); ++ ++ let mut commit_id = "HEAD".to_string(); ++ ++ // Parse spec ++ if let Some(spec) = args.arg_spec.as_ref() { ++ ++ let revspec = try!(repo.revparse(spec)); ++ ++ let (oldest, newest) = if revspec.mode().contains(git2::RevparseMode::SINGLE) { ++ (None, revspec.from()) ++ } else if revspec.mode().contains(git2::RevparseMode::RANGE) { ++ (revspec.from(), revspec.to()) ++ } else { ++ (None, None) ++ }; ++ ++ if let Some(commit) = oldest { ++ opts.oldest_commit(commit.id()); ++ } ++ ++ if let Some(commit) = newest { ++ opts.newest_commit(commit.id()); ++ if !commit.id().is_zero() { ++ commit_id = format!("{}", commit.id()) ++ } ++ } ++ ++ } ++ ++ let spec = format!("{}:{}", commit_id, path.display()); ++ let blame = try!(repo.blame_file(path, Some(&mut opts))); ++ let object = try!(repo.revparse_single(&spec[..])); ++ let blob = try!(repo.find_blob(object.id())); ++ let reader = BufReader::new(blob.content()); ++ ++ for (i, line) in reader.lines().enumerate() { ++ if let (Ok(line), Some(hunk)) = (line, blame.get_line(i+1)) { ++ let sig = hunk.final_signature(); ++ println!("{} {} <{}> {}", hunk.final_commit_id(), ++ String::from_utf8_lossy(sig.name_bytes()), ++ String::from_utf8_lossy(sig.email_bytes()), line); ++ } ++ } ++ ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: blame [options] [] ++ ++Options: ++ -M find line moves within and across files ++ -C find line copies within and across files ++ -F follow only the first parent commits ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/cat-file.rs index 000000000,000000000..68117bd88 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/cat-file.rs @@@ -1,0 -1,0 +1,142 @@@ ++/* ++ * libgit2 "cat-file" example - shows how to print data from the ODB ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use std::io::{self, Write}; ++ ++use docopt::Docopt; ++use git2::{Repository, ObjectType, Blob, Commit, Signature, Tag, Tree}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_object: String, ++ flag_t: bool, ++ flag_s: bool, ++ flag_e: bool, ++ flag_p: bool, ++ flag_q: bool, ++ flag_v: bool, ++ flag_git_dir: Option, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); ++ let repo = try!(Repository::open(path)); ++ ++ let obj = try!(repo.revparse_single(&args.arg_object)); ++ if args.flag_v && !args.flag_q { ++ println!("{} {}\n--", obj.kind().unwrap().str(), obj.id()); ++ } ++ ++ if args.flag_t { ++ println!("{}", obj.kind().unwrap().str()); ++ } else if args.flag_s || args.flag_e { ++ /* ... */ ++ } else if args.flag_p { ++ match obj.kind() { ++ Some(ObjectType::Blob) => { ++ show_blob(obj.as_blob().unwrap()); ++ } ++ Some(ObjectType::Commit) => { ++ show_commit(obj.as_commit().unwrap()); ++ } ++ Some(ObjectType::Tag) => { ++ show_tag(obj.as_tag().unwrap()); ++ } ++ Some(ObjectType::Tree) => { ++ show_tree(obj.as_tree().unwrap()); ++ } ++ Some(ObjectType::Any) | None => { ++ println!("unknown {}", obj.id()) ++ } ++ } ++ } ++ Ok(()) ++} ++ ++fn show_blob(blob: &Blob) { ++ io::stdout().write_all(blob.content()).unwrap(); ++} ++ ++fn show_commit(commit: &Commit) { ++ println!("tree {}", commit.tree_id()); ++ for parent in commit.parent_ids() { ++ println!("parent {}", parent); ++ } ++ show_sig("author", Some(commit.author())); ++ show_sig("committer", Some(commit.committer())); ++ if let Some(msg) = commit.message() { ++ println!("\n{}", msg); ++ } ++} ++ ++fn show_tag(tag: &Tag) { ++ println!("object {}", tag.target_id()); ++ println!("type {}", tag.target_type().unwrap().str()); ++ println!("tag {}", tag.name().unwrap()); ++ show_sig("tagger", tag.tagger()); ++ ++ if let Some(msg) = tag.message() { ++ println!("\n{}", msg); ++ } ++} ++ ++fn show_tree(tree: &Tree) { ++ for entry in tree.iter() { ++ println!("{:06o} {} {}\t{}", ++ entry.filemode(), ++ entry.kind().unwrap().str(), ++ entry.id(), ++ entry.name().unwrap()); ++ } ++} ++ ++fn show_sig(header: &str, sig: Option) { ++ let sig = match sig { Some(s) => s, None => return }; ++ let offset = sig.when().offset_minutes(); ++ let (sign, offset) = if offset < 0 {('-', -offset)} else {('+', offset)}; ++ let (hours, minutes) = (offset / 60, offset % 60); ++ println!("{} {} {} {}{:02}{:02}", ++ header, sig, sig.when().seconds(), sign, hours, minutes); ++ ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: cat-file (-t | -s | -e | -p) [options] ++ ++Options: ++ -t show the object type ++ -s show the object size ++ -e suppress all output ++ -p pretty print the contents of the object ++ -q suppress output ++ -v use verbose output ++ --git-dir use the specified directory as the base directory ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/clone.rs index 000000000,000000000..3bd72bc2e new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/clone.rs @@@ -1,0 -1,0 +1,124 @@@ ++/* ++ * libgit2 "clone" example ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::build::{RepoBuilder, CheckoutBuilder}; ++use git2::{RemoteCallbacks, Progress, FetchOptions}; ++use std::cell::RefCell; ++use std::io::{self, Write}; ++use std::path::{Path, PathBuf}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_url: String, ++ arg_path: String, ++} ++ ++struct State { ++ progress: Option>, ++ total: usize, ++ current: usize, ++ path: Option, ++ newline: bool, ++} ++ ++fn print(state: &mut State) { ++ let stats = state.progress.as_ref().unwrap(); ++ let network_pct = (100 * stats.received_objects()) / stats.total_objects(); ++ let index_pct = (100 * stats.indexed_objects()) / stats.total_objects(); ++ let co_pct = if state.total > 0 { ++ (100 * state.current) / state.total ++ } else { ++ 0 ++ }; ++ let kbytes = stats.received_bytes() / 1024; ++ if stats.received_objects() == stats.total_objects() { ++ if !state.newline { ++ println!(""); ++ state.newline = true; ++ } ++ print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), ++ stats.total_deltas()); ++ } else { ++ print!("net {:3}% ({:4} kb, {:5}/{:5}) / idx {:3}% ({:5}/{:5}) \ ++ / chk {:3}% ({:4}/{:4}) {}\r", ++ network_pct, kbytes, stats.received_objects(), ++ stats.total_objects(), ++ index_pct, stats.indexed_objects(), stats.total_objects(), ++ co_pct, state.current, state.total, ++ state.path ++ .as_ref() ++ .map(|s| s.to_string_lossy().into_owned()) ++ .unwrap_or_default()) ++ } ++ io::stdout().flush().unwrap(); ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let state = RefCell::new(State { ++ progress: None, ++ total: 0, ++ current: 0, ++ path: None, ++ newline: false, ++ }); ++ let mut cb = RemoteCallbacks::new(); ++ cb.transfer_progress(|stats| { ++ let mut state = state.borrow_mut(); ++ state.progress = Some(stats.to_owned()); ++ print(&mut *state); ++ true ++ }); ++ ++ let mut co = CheckoutBuilder::new(); ++ co.progress(|path, cur, total| { ++ let mut state = state.borrow_mut(); ++ state.path = path.map(|p| p.to_path_buf()); ++ state.current = cur; ++ state.total = total; ++ print(&mut *state); ++ }); ++ ++ let mut fo = FetchOptions::new(); ++ fo.remote_callbacks(cb); ++ try!(RepoBuilder::new().fetch_options(fo).with_checkout(co) ++ .clone(&args.arg_url, Path::new(&args.arg_path))); ++ println!(""); ++ ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: add [options] ++ ++Options: ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} ++ diff --cc vendor/git2-0.7.5/examples/diff.rs index 000000000,000000000..8664a0e30 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/diff.rs @@@ -1,0 -1,0 +1,284 @@@ ++/* ++ * libgit2 "diff" example - shows how to use the diff API ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use std::str; ++ ++use docopt::Docopt; ++use git2::{Repository, Error, Object, ObjectType, DiffOptions, Diff}; ++use git2::{DiffFindOptions, DiffFormat}; ++ ++#[derive(Deserialize)] #[allow(non_snake_case)] ++struct Args { ++ arg_from_oid: Option, ++ arg_to_oid: Option, ++ flag_patch: bool, ++ flag_cached: bool, ++ flag_nocached: bool, ++ flag_name_only: bool, ++ flag_name_status: bool, ++ flag_raw: bool, ++ flag_format: Option, ++ flag_color: bool, ++ flag_no_color: bool, ++ flag_R: bool, ++ flag_text: bool, ++ flag_ignore_space_at_eol: bool, ++ flag_ignore_space_change: bool, ++ flag_ignore_all_space: bool, ++ flag_ignored: bool, ++ flag_untracked: bool, ++ flag_patience: bool, ++ flag_minimal: bool, ++ flag_stat: bool, ++ flag_numstat: bool, ++ flag_shortstat: bool, ++ flag_summary: bool, ++ flag_find_renames: Option, ++ flag_find_copies: Option, ++ flag_find_copies_harder: bool, ++ flag_break_rewrites: bool, ++ flag_unified: Option, ++ flag_inter_hunk_context: Option, ++ flag_abbrev: Option, ++ flag_src_prefix: Option, ++ flag_dst_prefix: Option, ++ flag_git_dir: Option, ++} ++ ++const RESET: &'static str = "\u{1b}[m"; ++const BOLD: &'static str = "\u{1b}[1m"; ++const RED: &'static str = "\u{1b}[31m"; ++const GREEN: &'static str = "\u{1b}[32m"; ++const CYAN: &'static str = "\u{1b}[36m"; ++ ++#[derive(PartialEq, Eq, Copy, Clone)] ++enum Cache { Normal, Only, None } ++ ++fn run(args: &Args) -> Result<(), Error> { ++ let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); ++ let repo = try!(Repository::open(path)); ++ ++ // Prepare our diff options based on the arguments given ++ let mut opts = DiffOptions::new(); ++ opts.reverse(args.flag_R) ++ .force_text(args.flag_text) ++ .ignore_whitespace_eol(args.flag_ignore_space_at_eol) ++ .ignore_whitespace_change(args.flag_ignore_space_change) ++ .ignore_whitespace(args.flag_ignore_all_space) ++ .include_ignored(args.flag_ignored) ++ .include_untracked(args.flag_untracked) ++ .patience(args.flag_patience) ++ .minimal(args.flag_minimal); ++ if let Some(amt) = args.flag_unified { opts.context_lines(amt); } ++ if let Some(amt) = args.flag_inter_hunk_context { opts.interhunk_lines(amt); } ++ if let Some(amt) = args.flag_abbrev { opts.id_abbrev(amt); } ++ if let Some(ref s) = args.flag_src_prefix { opts.old_prefix(&s); } ++ if let Some(ref s) = args.flag_dst_prefix { opts.new_prefix(&s); } ++ if let Some("diff-index") = args.flag_format.as_ref().map(|s| &s[..]) { ++ opts.id_abbrev(40); ++ } ++ ++ // Prepare the diff to inspect ++ let t1 = try!(tree_to_treeish(&repo, args.arg_from_oid.as_ref())); ++ let t2 = try!(tree_to_treeish(&repo, args.arg_to_oid.as_ref())); ++ let head = try!(tree_to_treeish(&repo, Some(&"HEAD".to_string()))).unwrap(); ++ let mut diff = match (t1, t2, args.cache()) { ++ (Some(t1), Some(t2), _) => { ++ try!(repo.diff_tree_to_tree(t1.as_tree(), t2.as_tree(), ++ Some(&mut opts))) ++ } ++ (t1, None, Cache::None) => { ++ let t1 = t1.unwrap_or(head); ++ try!(repo.diff_tree_to_workdir(t1.as_tree(), Some(&mut opts))) ++ } ++ (t1, None, Cache::Only) => { ++ let t1 = t1.unwrap_or(head); ++ try!(repo.diff_tree_to_index(t1.as_tree(), None, Some(&mut opts))) ++ } ++ (Some(t1), None, _) => { ++ try!(repo.diff_tree_to_workdir_with_index(t1.as_tree(), ++ Some(&mut opts))) ++ } ++ (None, None, _) => { ++ try!(repo.diff_index_to_workdir(None, Some(&mut opts))) ++ } ++ (None, Some(_), _) => unreachable!(), ++ }; ++ ++ // Apply rename and copy detection if requested ++ if args.flag_break_rewrites || args.flag_find_copies_harder || ++ args.flag_find_renames.is_some() || args.flag_find_copies.is_some() ++ { ++ let mut opts = DiffFindOptions::new(); ++ if let Some(t) = args.flag_find_renames { ++ opts.rename_threshold(t); ++ opts.renames(true); ++ } ++ if let Some(t) = args.flag_find_copies { ++ opts.copy_threshold(t); ++ opts.copies(true); ++ } ++ opts.copies_from_unmodified(args.flag_find_copies_harder) ++ .rewrites(args.flag_break_rewrites); ++ try!(diff.find_similar(Some(&mut opts))); ++ } ++ ++ // Generate simple output ++ let stats = args.flag_stat | args.flag_numstat | args.flag_shortstat | ++ args.flag_summary; ++ if stats { ++ try!(print_stats(&diff, args)); ++ } ++ if args.flag_patch || !stats { ++ if args.color() { print!("{}", RESET); } ++ let mut last_color = None; ++ try!(diff.print(args.diff_format(), |_delta, _hunk, line| { ++ if args.color() { ++ let next = match line.origin() { ++ '+' => Some(GREEN), ++ '-' => Some(RED), ++ '>' => Some(GREEN), ++ '<' => Some(RED), ++ 'F' => Some(BOLD), ++ 'H' => Some(CYAN), ++ _ => None ++ }; ++ if args.color() && next != last_color { ++ if last_color == Some(BOLD) || next == Some(BOLD) { ++ print!("{}", RESET); ++ } ++ print!("{}", next.unwrap_or(RESET)); ++ last_color = next; ++ } ++ } ++ ++ match line.origin() { ++ '+' | '-' | ' ' => print!("{}", line.origin()), ++ _ => {} ++ } ++ print!("{}", str::from_utf8(line.content()).unwrap()); ++ true ++ })); ++ if args.color() { print!("{}", RESET); } ++ } ++ ++ Ok(()) ++} ++ ++fn print_stats(diff: &Diff, args: &Args) -> Result<(), Error> { ++ let stats = try!(diff.stats()); ++ let mut format = git2::DiffStatsFormat::NONE; ++ if args.flag_stat { ++ format |= git2::DiffStatsFormat::FULL; ++ } ++ if args.flag_shortstat { ++ format |= git2::DiffStatsFormat::SHORT; ++ } ++ if args.flag_numstat { ++ format |= git2::DiffStatsFormat::NUMBER; ++ } ++ if args.flag_summary { ++ format |= git2::DiffStatsFormat::INCLUDE_SUMMARY; ++ } ++ let buf = try!(stats.to_buf(format, 80)); ++ print!("{}", str::from_utf8(&*buf).unwrap()); ++ Ok(()) ++} ++ ++fn tree_to_treeish<'a>(repo: &'a Repository, arg: Option<&String>) ++ -> Result>, Error> { ++ let arg = match arg { Some(s) => s, None => return Ok(None) }; ++ let obj = try!(repo.revparse_single(arg)); ++ let tree = try!(obj.peel(ObjectType::Tree)); ++ Ok(Some(tree)) ++} ++ ++impl Args { ++ fn cache(&self) -> Cache { ++ if self.flag_cached {Cache::Only} ++ else if self.flag_nocached {Cache::None} ++ else {Cache::Normal} ++ } ++ fn color(&self) -> bool { self.flag_color && !self.flag_no_color } ++ fn diff_format(&self) -> DiffFormat { ++ if self.flag_patch {DiffFormat::Patch} ++ else if self.flag_name_only {DiffFormat::NameOnly} ++ else if self.flag_name_status {DiffFormat::NameStatus} ++ else if self.flag_raw {DiffFormat::Raw} ++ else { ++ match self.flag_format.as_ref().map(|s| &s[..]) { ++ Some("name") => DiffFormat::NameOnly, ++ Some("name-status") => DiffFormat::NameStatus, ++ Some("raw") => DiffFormat::Raw, ++ Some("diff-index") => DiffFormat::Raw, ++ _ => DiffFormat::Patch, ++ } ++ } ++ } ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: diff [options] [ []] ++ ++Options: ++ -p, --patch show output in patch format ++ --cached use staged changes as diff ++ --nocached do not use staged changes ++ --name-only show only names of changed files ++ --name-status show only names and status changes ++ --raw generate the raw format ++ --format= specify format for stat summary ++ --color use color output ++ --no-color never use color output ++ -R swap two inputs ++ -a, --text treat all files as text ++ --ignore-space-at-eol ignore changes in whitespace at EOL ++ -b, --ignore-space-change ignore changes in amount of whitespace ++ -w, --ignore-all-space ignore whitespace when comparing lines ++ --ignored show ignored files as well ++ --untracked show untracked files ++ --patience generate diff using the patience algorithm ++ --minimal spend extra time to find smallest diff ++ --stat generate a diffstat ++ --numstat similar to --stat, but more machine friendly ++ --shortstat only output last line of --stat ++ --summary output condensed summary of header info ++ -M, --find-renames set threshold for findind renames (default 50) ++ -C, --find-copies set threshold for finding copies (default 50) ++ --find-copies-harder inspect unmodified files for sources of copies ++ -B, --break-rewrites break complete rewrite changes into pairs ++ -U, --unified lints of context to show ++ --inter-hunk-context maximum lines of change between hunks ++ --abbrev length to abbreviate commits to ++ --src-prefix show given source prefix instead of 'a/' ++ --dst-prefix show given destinction prefix instead of 'b/' ++ --git-dir path to git repository to use ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/fetch.rs index 000000000,000000000..c667d7b63 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/fetch.rs @@@ -1,0 -1,0 +1,128 @@@ ++/* ++ * libgit2 "fetch" example - shows how to fetch remote data ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::{Repository, RemoteCallbacks, AutotagOption, FetchOptions}; ++use std::io::{self, Write}; ++use std::str; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_remote: Option, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let repo = try!(Repository::open(".")); ++ let remote = args.arg_remote.as_ref().map(|s| &s[..]).unwrap_or("origin"); ++ ++ // Figure out whether it's a named remote or a URL ++ println!("Fetching {} for repo", remote); ++ let mut cb = RemoteCallbacks::new(); ++ let mut remote = try!(repo.find_remote(remote).or_else(|_| { ++ repo.remote_anonymous(remote) ++ })); ++ cb.sideband_progress(|data| { ++ print!("remote: {}", str::from_utf8(data).unwrap()); ++ io::stdout().flush().unwrap(); ++ true ++ }); ++ ++ // This callback gets called for each remote-tracking branch that gets ++ // updated. The message we output depends on whether it's a new one or an ++ // update. ++ cb.update_tips(|refname, a, b| { ++ if a.is_zero() { ++ println!("[new] {:20} {}", b, refname); ++ } else { ++ println!("[updated] {:10}..{:10} {}", a, b, refname); ++ } ++ true ++ }); ++ ++ // Here we show processed and total objects in the pack and the amount of ++ // received data. Most frontends will probably want to show a percentage and ++ // the download rate. ++ cb.transfer_progress(|stats| { ++ if stats.received_objects() == stats.total_objects() { ++ print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), ++ stats.total_deltas()); ++ } else if stats.total_objects() > 0 { ++ print!("Received {}/{} objects ({}) in {} bytes\r", ++ stats.received_objects(), ++ stats.total_objects(), ++ stats.indexed_objects(), ++ stats.received_bytes()); ++ } ++ io::stdout().flush().unwrap(); ++ true ++ }); ++ ++ // Download the packfile and index it. This function updates the amount of ++ // received data and the indexer stats which lets you inform the user about ++ // progress. ++ let mut fo = FetchOptions::new(); ++ fo.remote_callbacks(cb); ++ try!(remote.download(&[], Some(&mut fo))); ++ ++ { ++ // If there are local objects (we got a thin pack), then tell the user ++ // how many objects we saved from having to cross the network. ++ let stats = remote.stats(); ++ if stats.local_objects() > 0 { ++ println!("\rReceived {}/{} objects in {} bytes (used {} local \ ++ objects)", stats.indexed_objects(), ++ stats.total_objects(), stats.received_bytes(), ++ stats.local_objects()); ++ } else { ++ println!("\rReceived {}/{} objects in {} bytes", ++ stats.indexed_objects(), stats.total_objects(), ++ stats.received_bytes()); ++ } ++ } ++ ++ // Disconnect the underlying connection to prevent from idling. ++ remote.disconnect(); ++ ++ // Update the references in the remote's namespace to point to the right ++ // commits. This may be needed even if there was no packfile to download, ++ // which can happen e.g. when the branches have been changed but all the ++ // needed objects are available locally. ++ try!(remote.update_tips(None, true, ++ AutotagOption::Unspecified, None)); ++ ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: fetch [options] [] ++ ++Options: ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/init.rs index 000000000,000000000..0f6b83809 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/init.rs @@@ -1,0 -1,0 +1,152 @@@ ++/* ++ * libgit2 "init" example - shows how to initialize a new repo ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::{Repository, RepositoryInitOptions, RepositoryInitMode, Error}; ++use std::path::{PathBuf, Path}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_directory: String, ++ flag_quiet: bool, ++ flag_bare: bool, ++ flag_template: Option, ++ flag_separate_git_dir: Option, ++ flag_initial_commit: bool, ++ flag_shared: Option, ++} ++ ++fn run(args: &Args) -> Result<(), Error> { ++ let mut path = PathBuf::from(&args.arg_directory); ++ let repo = if !args.flag_bare && args.flag_template.is_none() && ++ args.flag_shared.is_none() && ++ args.flag_separate_git_dir.is_none() { ++ try!(Repository::init(&path)) ++ } else { ++ let mut opts = RepositoryInitOptions::new(); ++ opts.bare(args.flag_bare); ++ if let Some(ref s) = args.flag_template { ++ opts.template_path(Path::new(s)); ++ } ++ ++ // If you specified a separate git directory, then initialize ++ // the repository at that path and use the second path as the ++ // working directory of the repository (with a git-link file) ++ if let Some(ref s) = args.flag_separate_git_dir { ++ opts.workdir_path(&path); ++ path = PathBuf::from(s); ++ } ++ ++ if let Some(ref s) = args.flag_shared { ++ opts.mode(try!(parse_shared(s))); ++ } ++ try!(Repository::init_opts(&path, &opts)) ++ }; ++ ++ // Print a message to stdout like "git init" does ++ if !args.flag_quiet { ++ if args.flag_bare || args.flag_separate_git_dir.is_some() { ++ path = repo.path().to_path_buf(); ++ } else { ++ path = repo.workdir().unwrap().to_path_buf(); ++ } ++ println!("Initialized empty Git repository in {}", path.display()); ++ } ++ ++ if args.flag_initial_commit { ++ try!(create_initial_commit(&repo)); ++ println!("Created empty initial commit"); ++ } ++ ++ Ok(()) ++} ++ ++/// Unlike regular "git init", this example shows how to create an initial empty ++/// commit in the repository. This is the helper function that does that. ++fn create_initial_commit(repo: &Repository) -> Result<(), Error> { ++ // First use the config to initialize a commit signature for the user. ++ let sig = try!(repo.signature()); ++ ++ // Now let's create an empty tree for this commit ++ let tree_id = { ++ let mut index = try!(repo.index()); ++ ++ // Outside of this example, you could call index.add_path() ++ // here to put actual files into the index. For our purposes, we'll ++ // leave it empty for now. ++ ++ try!(index.write_tree()) ++ }; ++ ++ let tree = try!(repo.find_tree(tree_id)); ++ ++ // Ready to create the initial commit. ++ // ++ // Normally creating a commit would involve looking up the current HEAD ++ // commit and making that be the parent of the initial commit, but here this ++ // is the first commit so there will be no parent. ++ try!(repo.commit(Some("HEAD"), &sig, &sig, "Initial commit", &tree, &[])); ++ ++ Ok(()) ++} ++ ++fn parse_shared(shared: &str) -> Result { ++ match shared { ++ "false" | "umask" => Ok(git2::RepositoryInitMode::SHARED_UMASK), ++ "true" | "group" => Ok(git2::RepositoryInitMode::SHARED_GROUP), ++ "all" | "world" => Ok(git2::RepositoryInitMode::SHARED_ALL), ++ _ => { ++ if shared.starts_with('0') { ++ match u32::from_str_radix(&shared[1..], 8).ok() { ++ Some(n) => { ++ Ok(RepositoryInitMode::from_bits_truncate(n)) ++ } ++ None => { ++ Err(Error::from_str("invalid octal value for --shared")) ++ } ++ } ++ } else { ++ Err(Error::from_str("unknown value for --shared")) ++ } ++ } ++ } ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: init [options] ++ ++Options: ++ -q, --quiet don't print information to stdout ++ --bare initialize a new bare repository ++ --template use as an initialization template ++ --separate-git-dir use as the .git directory ++ --initial-commit create an initial empty commit ++ --shared permissions to create the repository with ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/log.rs index 000000000,000000000..70e8145da new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/log.rs @@@ -1,0 -1,0 +1,263 @@@ ++/* ++ * libgit2 "log" example - shows how to walk history and get commit info ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++#[macro_use] ++extern crate serde_derive; ++extern crate docopt; ++extern crate git2; ++extern crate time; ++ ++use std::str; ++use docopt::Docopt; ++use git2::{Repository, Signature, Commit, ObjectType, Time, DiffOptions}; ++use git2::{Pathspec, Error, DiffFormat}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_commit: Vec, ++ arg_spec: Vec, ++ flag_topo_order: bool, ++ flag_date_order: bool, ++ flag_reverse: bool, ++ flag_author: Option, ++ flag_committer: Option, ++ flag_grep: Option, ++ flag_git_dir: Option, ++ flag_skip: Option, ++ flag_max_count: Option, ++ flag_merges: bool, ++ flag_no_merges: bool, ++ flag_no_min_parents: bool, ++ flag_no_max_parents: bool, ++ flag_max_parents: Option, ++ flag_min_parents: Option, ++ flag_patch: bool, ++} ++ ++fn run(args: &Args) -> Result<(), Error> { ++ let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); ++ let repo = try!(Repository::open(path)); ++ let mut revwalk = try!(repo.revwalk()); ++ ++ // Prepare the revwalk based on CLI parameters ++ let base = if args.flag_reverse {git2::Sort::REVERSE} else {git2::Sort::NONE}; ++ revwalk.set_sorting(base | if args.flag_topo_order { ++ git2::Sort::TOPOLOGICAL ++ } else if args.flag_date_order { ++ git2::Sort::TIME ++ } else { ++ git2::Sort::NONE ++ }); ++ for commit in &args.arg_commit { ++ if commit.starts_with('^') { ++ let obj = try!(repo.revparse_single(&commit[1..])); ++ try!(revwalk.hide(obj.id())); ++ continue ++ } ++ let revspec = try!(repo.revparse(commit)); ++ if revspec.mode().contains(git2::RevparseMode::SINGLE) { ++ try!(revwalk.push(revspec.from().unwrap().id())); ++ } else { ++ let from = revspec.from().unwrap().id(); ++ let to = revspec.to().unwrap().id(); ++ try!(revwalk.push(to)); ++ if revspec.mode().contains(git2::RevparseMode::MERGE_BASE) { ++ let base = try!(repo.merge_base(from, to)); ++ let o = try!(repo.find_object(base, Some(ObjectType::Commit))); ++ try!(revwalk.push(o.id())); ++ } ++ try!(revwalk.hide(from)); ++ } ++ } ++ if args.arg_commit.is_empty() { ++ try!(revwalk.push_head()); ++ } ++ ++ // Prepare our diff options and pathspec matcher ++ let (mut diffopts, mut diffopts2) = (DiffOptions::new(), DiffOptions::new()); ++ for spec in &args.arg_spec { ++ diffopts.pathspec(spec); ++ diffopts2.pathspec(spec); ++ } ++ let ps = try!(Pathspec::new(args.arg_spec.iter())); ++ ++ // Filter our revwalk based on the CLI parameters ++ macro_rules! filter_try { ++ ($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) }) ++ } ++ let revwalk = revwalk.filter_map(|id| { ++ let id = filter_try!(id); ++ let commit = filter_try!(repo.find_commit(id)); ++ let parents = commit.parents().len(); ++ if parents < args.min_parents() { return None } ++ if let Some(n) = args.max_parents() { ++ if parents >= n { return None } ++ } ++ if !args.arg_spec.is_empty() { ++ match commit.parents().len() { ++ 0 => { ++ let tree = filter_try!(commit.tree()); ++ let flags = git2::PathspecFlags::NO_MATCH_ERROR; ++ if ps.match_tree(&tree, flags).is_err() { return None } ++ } ++ _ => { ++ let m = commit.parents().all(|parent| { ++ match_with_parent(&repo, &commit, &parent, &mut diffopts) ++ .unwrap_or(false) ++ }); ++ if !m { return None } ++ } ++ } ++ } ++ if !sig_matches(&commit.author(), &args.flag_author) { return None } ++ if !sig_matches(&commit.committer(), &args.flag_committer) { return None } ++ if !log_message_matches(commit.message(), &args.flag_grep) { return None } ++ Some(Ok(commit)) ++ }).skip(args.flag_skip.unwrap_or(0)).take(args.flag_max_count.unwrap_or(!0)); ++ ++ // print! ++ for commit in revwalk { ++ let commit = try!(commit); ++ print_commit(&commit); ++ if !args.flag_patch || commit.parents().len() > 1 { continue } ++ let a = if commit.parents().len() == 1 { ++ let parent = try!(commit.parent(0)); ++ Some(try!(parent.tree())) ++ } else { ++ None ++ }; ++ let b = try!(commit.tree()); ++ let diff = try!(repo.diff_tree_to_tree(a.as_ref(), Some(&b), ++ Some(&mut diffopts2))); ++ try!(diff.print(DiffFormat::Patch, |_delta, _hunk, line| { ++ match line.origin() { ++ ' ' | '+' | '-' => print!("{}", line.origin()), ++ _ => {} ++ } ++ print!("{}", str::from_utf8(line.content()).unwrap()); ++ true ++ })); ++ } ++ ++ Ok(()) ++} ++ ++fn sig_matches(sig: &Signature, arg: &Option) -> bool { ++ match *arg { ++ Some(ref s) => { ++ sig.name().map(|n| n.contains(s)).unwrap_or(false) || ++ sig.email().map(|n| n.contains(s)).unwrap_or(false) ++ } ++ None => true ++ } ++} ++ ++fn log_message_matches(msg: Option<&str>, grep: &Option) -> bool { ++ match (grep, msg) { ++ (&None, _) => true, ++ (&Some(_), None) => false, ++ (&Some(ref s), Some(msg)) => msg.contains(s), ++ } ++} ++ ++fn print_commit(commit: &Commit) { ++ println!("commit {}", commit.id()); ++ ++ if commit.parents().len() > 1 { ++ print!("Merge:"); ++ for id in commit.parent_ids() { ++ print!(" {:.8}", id); ++ } ++ println!(""); ++ } ++ ++ let author = commit.author(); ++ println!("Author: {}", author); ++ print_time(&author.when(), "Date: "); ++ println!(""); ++ ++ for line in String::from_utf8_lossy(commit.message_bytes()).lines() { ++ println!(" {}", line); ++ } ++ println!(""); ++} ++ ++fn print_time(time: &Time, prefix: &str) { ++ let (offset, sign) = match time.offset_minutes() { ++ n if n < 0 => (-n, '-'), ++ n => (n, '+'), ++ }; ++ let (hours, minutes) = (offset / 60, offset % 60); ++ let ts = time::Timespec::new(time.seconds() + ++ (time.offset_minutes() as i64) * 60, 0); ++ let time = time::at(ts); ++ ++ println!("{}{} {}{:02}{:02}", prefix, ++ time.strftime("%a %b %e %T %Y").unwrap(), sign, hours, minutes); ++ ++} ++ ++fn match_with_parent(repo: &Repository, commit: &Commit, parent: &Commit, ++ opts: &mut DiffOptions) -> Result { ++ let a = try!(parent.tree()); ++ let b = try!(commit.tree()); ++ let diff = try!(repo.diff_tree_to_tree(Some(&a), Some(&b), Some(opts))); ++ Ok(diff.deltas().len() > 0) ++} ++ ++impl Args { ++ fn min_parents(&self) -> usize { ++ if self.flag_no_min_parents { return 0 } ++ self.flag_min_parents.unwrap_or(if self.flag_merges {2} else {0}) ++ } ++ ++ fn max_parents(&self) -> Option { ++ if self.flag_no_max_parents { return None } ++ self.flag_max_parents.or(if self.flag_no_merges {Some(1)} else {None}) ++ } ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: log [options] [..] [--] [..] ++ ++Options: ++ --topo-order sort commits in topological order ++ --date-order sort commits in date order ++ --reverse sort commits in reverse ++ --author author to sort by ++ --committer committer to sort by ++ --grep pattern to filter commit messages by ++ --git-dir alternative git directory to use ++ --skip number of commits to skip ++ -n, --max-count maximum number of commits to show ++ --merges only show merge commits ++ --no-merges don't show merge commits ++ --no-min-parents don't require a minimum number of parents ++ --no-max-parents don't require a maximum number of parents ++ --max-parents specify a maximum number of parents for a commit ++ --min-parents specify a minimum number of parents for a commit ++ -p, --patch show commit diff ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/ls-remote.rs index 000000000,000000000..5da9575f7 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/ls-remote.rs @@@ -1,0 -1,0 +1,63 @@@ ++/* ++ * libgit2 "ls-remote" example ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::{Repository, Direction}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_remote: String, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let repo = try!(Repository::open(".")); ++ let remote = &args.arg_remote; ++ let mut remote = try!(repo.find_remote(remote).or_else(|_| { ++ repo.remote_anonymous(remote) ++ })); ++ ++ // Connect to the remote and call the printing function for each of the ++ // remote references. ++ let connection = try!(remote.connect_auth(Direction::Fetch, None, None)); ++ ++ // Get the list of references on the remote and print out their name next to ++ // what they point to. ++ for head in try!(connection.list()).iter() { ++ println!("{}\t{}", head.oid(), head.name()); ++ } ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: ls-remote [option] ++ ++Options: ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/rev-list.rs index 000000000,000000000..db9bd82ca new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/rev-list.rs @@@ -1,0 -1,0 +1,97 @@@ ++/* ++ * libgit2 "rev-list" example - shows how to transform a rev-spec into a list ++ * of commit ids ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::{Repository, Error, Revwalk, Oid}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_spec: Vec, ++ flag_topo_order: bool, ++ flag_date_order: bool, ++ flag_reverse: bool, ++ flag_not: Vec, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let repo = try!(Repository::open(".")); ++ let mut revwalk = try!(repo.revwalk()); ++ ++ let base = if args.flag_reverse {git2::Sort::REVERSE} else {git2::Sort::NONE}; ++ revwalk.set_sorting(base | if args.flag_topo_order { ++ git2::Sort::TOPOLOGICAL ++ } else if args.flag_date_order { ++ git2::Sort::TIME ++ } else { ++ git2::Sort::NONE ++ }); ++ ++ let specs = args.flag_not.iter().map(|s| (s, true)) ++ .chain(args.arg_spec.iter().map(|s| (s, false))) ++ .map(|(spec, hide)| { ++ if spec.starts_with('^') {(&spec[1..], !hide)} else {(&spec[..], hide)} ++ }); ++ for (spec, hide) in specs { ++ let id = if spec.contains("..") { ++ let revspec = try!(repo.revparse(spec)); ++ if revspec.mode().contains(git2::RevparseMode::MERGE_BASE) { ++ return Err(Error::from_str("merge bases not implemented")) ++ } ++ try!(push(&mut revwalk, revspec.from().unwrap().id(), !hide)); ++ revspec.to().unwrap().id() ++ } else { ++ try!(repo.revparse_single(spec)).id() ++ }; ++ try!(push(&mut revwalk, id, hide)); ++ } ++ ++ for id in revwalk { ++ let id = try!(id); ++ println!("{}", id); ++ } ++ Ok(()) ++} ++ ++fn push(revwalk: &mut Revwalk, id: Oid, hide: bool) -> Result<(), Error> { ++ if hide {revwalk.hide(id)} else {revwalk.push(id)} ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: rev-list [options] [--] ... ++ ++Options: ++ --topo-order sort commits in topological order ++ --date-order sort commits in date order ++ --reverse sort commits in reverse ++ --not don't show ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} ++ diff --cc vendor/git2-0.7.5/examples/rev-parse.rs index 000000000,000000000..f2416f7b4 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/rev-parse.rs @@@ -1,0 -1,0 +1,70 @@@ ++/* ++ * libgit2 "rev-parse" example - shows how to parse revspecs ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use docopt::Docopt; ++use git2::Repository; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_spec: String, ++ flag_git_dir: Option, ++} ++ ++fn run(args: &Args) -> Result<(), git2::Error> { ++ let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); ++ let repo = try!(Repository::open(path)); ++ ++ let revspec = try!(repo.revparse(&args.arg_spec)); ++ ++ if revspec.mode().contains(git2::RevparseMode::SINGLE) { ++ println!("{}", revspec.from().unwrap().id()); ++ } else if revspec.mode().contains(git2::RevparseMode::RANGE) { ++ let to = revspec.to().unwrap(); ++ let from = revspec.from().unwrap(); ++ println!("{}", to.id()); ++ ++ if revspec.mode().contains(git2::RevparseMode::MERGE_BASE) { ++ let base = try!(repo.merge_base(from.id(), to.id())); ++ println!("{}", base); ++ } ++ ++ println!("^{}", from.id()); ++ } else { ++ return Err(git2::Error::from_str("invalid results from revparse")) ++ } ++ Ok(()) ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: rev-parse [options] ++ ++Options: ++ --git-dir directory for the git repository to check ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/status.rs index 000000000,000000000..87ca6ec31 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/status.rs @@@ -1,0 -1,0 +1,369 @@@ ++/* ++ * libgit2 "status" example - shows how to use the status APIs ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use std::str; ++use std::time::Duration; ++use docopt::Docopt; ++use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_spec: Vec, ++ flag_short: bool, ++ flag_porcelain: bool, ++ flag_branch: bool, ++ flag_z: bool, ++ flag_ignored: bool, ++ flag_untracked_files: Option, ++ flag_ignore_submodules: Option, ++ flag_git_dir: Option, ++ flag_repeat: bool, ++ flag_list_submodules: bool, ++} ++ ++#[derive(Eq, PartialEq)] ++enum Format { Long, Short, Porcelain } ++ ++fn run(args: &Args) -> Result<(), Error> { ++ let path = args.flag_git_dir.clone().unwrap_or_else(|| ".".to_string()); ++ let repo = try!(Repository::open(&path)); ++ if repo.is_bare() { ++ return Err(Error::from_str("cannot report status on bare repository")) ++ } ++ ++ let mut opts = StatusOptions::new(); ++ opts.include_ignored(args.flag_ignored); ++ match args.flag_untracked_files.as_ref().map(|s| &s[..]) { ++ Some("no") => { opts.include_untracked(false); } ++ Some("normal") => { opts.include_untracked(true); } ++ Some("all") => { ++ opts.include_untracked(true).recurse_untracked_dirs(true); ++ } ++ Some(_) => return Err(Error::from_str("invalid untracked-files value")), ++ None => {} ++ } ++ match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) { ++ Some("all") => { opts.exclude_submodules(true); } ++ Some(_) => return Err(Error::from_str("invalid ignore-submodules value")), ++ None => {} ++ } ++ opts.include_untracked(!args.flag_ignored); ++ for spec in &args.arg_spec { ++ opts.pathspec(spec); ++ } ++ ++ loop { ++ if args.flag_repeat { ++ println!("\u{1b}[H\u{1b}[2J"); ++ } ++ ++ let statuses = try!(repo.statuses(Some(&mut opts))); ++ ++ if args.flag_branch { ++ try!(show_branch(&repo, &args.format())); ++ } ++ if args.flag_list_submodules { ++ try!(print_submodules(&repo)); ++ } ++ ++ if args.format() == Format::Long { ++ print_long(&statuses); ++ } else { ++ print_short(&repo, &statuses); ++ } ++ ++ if args.flag_repeat { ++ std::thread::sleep(Duration::new(10, 0)); ++ } else { ++ return Ok(()) ++ } ++ } ++} ++ ++fn show_branch(repo: &Repository, format: &Format) -> Result<(), Error> { ++ let head = match repo.head() { ++ Ok(head) => Some(head), ++ Err(ref e) if e.code() == ErrorCode::UnbornBranch || ++ e.code() == ErrorCode::NotFound => None, ++ Err(e) => return Err(e), ++ }; ++ let head = head.as_ref().and_then(|h| h.shorthand()); ++ ++ if format == &Format::Long { ++ println!("# On branch {}", ++ head.unwrap_or("Not currently on any branch")); ++ } else { ++ println!("## {}", head.unwrap_or("HEAD (no branch)")); ++ } ++ Ok(()) ++} ++ ++fn print_submodules(repo: &Repository) -> Result<(), Error> { ++ let modules = try!(repo.submodules()); ++ println!("# Submodules"); ++ for sm in &modules { ++ println!("# - submodule '{}' at {}", sm.name().unwrap(), ++ sm.path().display()); ++ } ++ Ok(()) ++} ++ ++// This function print out an output similar to git's status command in long ++// form, including the command-line hints. ++fn print_long(statuses: &git2::Statuses) { ++ let mut header = false; ++ let mut rm_in_workdir = false; ++ let mut changes_in_index = false; ++ let mut changed_in_workdir = false; ++ ++ // Print index changes ++ for entry in statuses.iter().filter(|e| e.status() != git2::Status::CURRENT) { ++ if entry.status().contains(git2::Status::WT_DELETED) { ++ rm_in_workdir = true; ++ } ++ let istatus = match entry.status() { ++ s if s.contains(git2::Status::INDEX_NEW) => "new file: ", ++ s if s.contains(git2::Status::INDEX_MODIFIED) => "modified: ", ++ s if s.contains(git2::Status::INDEX_DELETED) => "deleted: ", ++ s if s.contains(git2::Status::INDEX_RENAMED) => "renamed: ", ++ s if s.contains(git2::Status::INDEX_TYPECHANGE) => "typechange:", ++ _ => continue, ++ }; ++ if !header { ++ println!("\ ++# Changes to be committed: ++# (use \"git reset HEAD ...\" to unstage) ++#"); ++ header = true; ++ } ++ ++ let old_path = entry.head_to_index().unwrap().old_file().path(); ++ let new_path = entry.head_to_index().unwrap().new_file().path(); ++ match (old_path, new_path) { ++ (Some(old), Some(new)) if old != new => { ++ println!("#\t{} {} -> {}", istatus, old.display(), ++ new.display()); ++ } ++ (old, new) => { ++ println!("#\t{} {}", istatus, old.or(new).unwrap().display()); ++ } ++ } ++ } ++ ++ if header { ++ changes_in_index = true; ++ println!("#"); ++ } ++ header = false; ++ ++ // Print workdir changes to tracked files ++ for entry in statuses.iter() { ++ // With `Status::OPT_INCLUDE_UNMODIFIED` (not used in this example) ++ // `index_to_workdir` may not be `None` even if there are no differences, ++ // in which case it will be a `Delta::Unmodified`. ++ if entry.status() == git2::Status::CURRENT || ++ entry.index_to_workdir().is_none() { ++ continue ++ } ++ ++ let istatus = match entry.status() { ++ s if s.contains(git2::Status::WT_MODIFIED) => "modified: ", ++ s if s.contains(git2::Status::WT_DELETED) => "deleted: ", ++ s if s.contains(git2::Status::WT_RENAMED) => "renamed: ", ++ s if s.contains(git2::Status::WT_TYPECHANGE) => "typechange:", ++ _ => continue, ++ }; ++ ++ if !header { ++ println!("\ ++# Changes not staged for commit: ++# (use \"git add{} ...\" to update what will be committed) ++# (use \"git checkout -- ...\" to discard changes in working directory) ++#\ ++ ", if rm_in_workdir {"/rm"} else {""}); ++ header = true; ++ } ++ ++ let old_path = entry.index_to_workdir().unwrap().old_file().path(); ++ let new_path = entry.index_to_workdir().unwrap().new_file().path(); ++ match (old_path, new_path) { ++ (Some(old), Some(new)) if old != new => { ++ println!("#\t{} {} -> {}", istatus, old.display(), ++ new.display()); ++ } ++ (old, new) => { ++ println!("#\t{} {}", istatus, old.or(new).unwrap().display()); ++ } ++ } ++ } ++ ++ if header { ++ changed_in_workdir = true; ++ println!("#"); ++ } ++ header = false; ++ ++ // Print untracked files ++ for entry in statuses.iter().filter(|e| e.status() == git2::Status::WT_NEW) { ++ if !header { ++ println!("\ ++# Untracked files ++# (use \"git add ...\" to include in what will be committed) ++#"); ++ header = true; ++ } ++ let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); ++ println!("#\t{}", file.display()); ++ } ++ header = false; ++ ++ // Print ignored files ++ for entry in statuses.iter().filter(|e| e.status() == git2::Status::IGNORED) { ++ if !header { ++ println!("\ ++# Ignored files ++# (use \"git add -f ...\" to include in what will be committed) ++#"); ++ header = true; ++ } ++ let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); ++ println!("#\t{}", file.display()); ++ } ++ ++ if !changes_in_index && changed_in_workdir { ++ println!("no changes added to commit (use \"git add\" and/or \ ++ \"git commit -a\")"); ++ } ++} ++ ++// This version of the output prefixes each path with two status columns and ++// shows submodule status information. ++fn print_short(repo: &Repository, statuses: &git2::Statuses) { ++ for entry in statuses.iter().filter(|e| e.status() != git2::Status::CURRENT) { ++ let mut istatus = match entry.status() { ++ s if s.contains(git2::Status::INDEX_NEW) => 'A', ++ s if s.contains(git2::Status::INDEX_MODIFIED) => 'M', ++ s if s.contains(git2::Status::INDEX_DELETED) => 'D', ++ s if s.contains(git2::Status::INDEX_RENAMED) => 'R', ++ s if s.contains(git2::Status::INDEX_TYPECHANGE) => 'T', ++ _ => ' ', ++ }; ++ let mut wstatus = match entry.status() { ++ s if s.contains(git2::Status::WT_NEW) => { ++ if istatus == ' ' { istatus = '?'; } '?' ++ } ++ s if s.contains(git2::Status::WT_MODIFIED) => 'M', ++ s if s.contains(git2::Status::WT_DELETED) => 'D', ++ s if s.contains(git2::Status::WT_RENAMED) => 'R', ++ s if s.contains(git2::Status::WT_TYPECHANGE) => 'T', ++ _ => ' ', ++ }; ++ ++ if entry.status().contains(git2::Status::IGNORED) { ++ istatus = '!'; ++ wstatus = '!'; ++ } ++ if istatus == '?' && wstatus == '?' { continue } ++ let mut extra = ""; ++ ++ // A commit in a tree is how submodules are stored, so let's go take a ++ // look at its status. ++ // ++ // TODO: check for GIT_FILEMODE_COMMIT ++ let status = entry.index_to_workdir().and_then(|diff| { ++ let ignore = SubmoduleIgnore::Unspecified; ++ diff.new_file().path_bytes() ++ .and_then(|s| str::from_utf8(s).ok()) ++ .and_then(|name| repo.submodule_status(name, ignore).ok()) ++ }); ++ if let Some(status) = status { ++ if status.contains(git2::SubmoduleStatus::WD_MODIFIED) { ++ extra = " (new commits)"; ++ } else if status.contains(git2::SubmoduleStatus::WD_INDEX_MODIFIED) || status.contains(git2::SubmoduleStatus::WD_WD_MODIFIED) { ++ extra = " (modified content)"; ++ } else if status.contains(git2::SubmoduleStatus::WD_UNTRACKED) { ++ extra = " (untracked content)"; ++ } ++ } ++ ++ let (mut a, mut b, mut c) = (None, None, None); ++ if let Some(diff) = entry.head_to_index() { ++ a = diff.old_file().path(); ++ b = diff.new_file().path(); ++ } ++ if let Some(diff) = entry.index_to_workdir() { ++ a = a.or_else(|| diff.old_file().path()); ++ b = b.or_else(|| diff.old_file().path()); ++ c = diff.new_file().path(); ++ } ++ ++ match (istatus, wstatus) { ++ ('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(), ++ b.unwrap().display(), c.unwrap().display(), ++ extra), ++ ('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(), ++ b.unwrap().display(), extra), ++ (i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(), ++ c.unwrap().display(), extra), ++ (i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra), ++ } ++ } ++ ++ for entry in statuses.iter().filter(|e| e.status() == git2::Status::WT_NEW) { ++ println!("?? {}", entry.index_to_workdir().unwrap().old_file() ++ .path().unwrap().display()); ++ } ++} ++ ++impl Args { ++ fn format(&self) -> Format { ++ if self.flag_short { Format::Short } ++ else if self.flag_porcelain || self.flag_z { Format::Porcelain } ++ else { Format::Long } ++ } ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: status [options] [--] [..] ++ ++Options: ++ -s, --short show short statuses ++ --long show longer statuses (default) ++ --porcelain ?? ++ -b, --branch show branch information ++ -z ?? ++ --ignored show ignored files as well ++ --untracked-files setting for showing untracked files [no|normal|all] ++ --ignore-submodules setting for ignoring submodules [all] ++ --git-dir git directory to analyze ++ --repeat repeatedly show status, sleeping inbetween ++ --list-submodules show submodules ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/examples/tag.rs index 000000000,000000000..f0ff3a219 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/examples/tag.rs @@@ -1,0 -1,0 +1,134 @@@ ++/* ++ * libgit2 "tag" example - shows how to list, create and delete tags ++ * ++ * Written by the libgit2 contributors ++ * ++ * To the extent possible under law, the author(s) have dedicated all copyright ++ * and related and neighboring rights to this software to the public domain ++ * worldwide. This software is distributed without any warranty. ++ * ++ * You should have received a copy of the CC0 Public Domain Dedication along ++ * with this software. If not, see ++ * . ++ */ ++ ++#![deny(warnings)] ++ ++extern crate git2; ++extern crate docopt; ++#[macro_use] ++extern crate serde_derive; ++ ++use std::str; ++use docopt::Docopt; ++use git2::{Repository, Error, Tag, Commit}; ++ ++#[derive(Deserialize)] ++struct Args { ++ arg_tagname: Option, ++ arg_object: Option, ++ arg_pattern: Option, ++ flag_n: Option, ++ flag_force: bool, ++ flag_list: bool, ++ flag_delete: Option, ++ flag_message: Option, ++} ++ ++fn run(args: &Args) -> Result<(), Error> { ++ let repo = try!(Repository::open(".")); ++ ++ if let Some(ref name) = args.arg_tagname { ++ let target = args.arg_object.as_ref().map(|s| &s[..]).unwrap_or("HEAD"); ++ let obj = try!(repo.revparse_single(target)); ++ ++ if let Some(ref message) = args.flag_message { ++ let sig = try!(repo.signature()); ++ try!(repo.tag(name, &obj, &sig, message, args.flag_force)); ++ } else { ++ try!(repo.tag_lightweight(name, &obj, args.flag_force)); ++ } ++ ++ } else if let Some(ref name) = args.flag_delete { ++ let obj = try!(repo.revparse_single(name)); ++ let id = try!(obj.short_id()); ++ try!(repo.tag_delete(name)); ++ println!("Deleted tag '{}' (was {})", name, ++ str::from_utf8(&*id).unwrap()); ++ ++ } else if args.flag_list { ++ let pattern = args.arg_pattern.as_ref().map(|s| &s[..]).unwrap_or("*"); ++ for name in try!(repo.tag_names(Some(pattern))).iter() { ++ let name = name.unwrap(); ++ let obj = try!(repo.revparse_single(name)); ++ ++ if let Some(tag) = obj.as_tag() { ++ print_tag(tag, args); ++ } else if let Some(commit) = obj.as_commit() { ++ print_commit(commit, name, args); ++ } else { ++ print_name(name); ++ } ++ } ++ } ++ Ok(()) ++} ++ ++fn print_tag(tag: &Tag, args: &Args) { ++ print!("{:<16}", tag.name().unwrap()); ++ if args.flag_n.is_some() { ++ print_list_lines(tag.message(), args); ++ } else { ++ println!(""); ++ } ++} ++ ++fn print_commit(commit: &Commit, name: &str, args: &Args) { ++ print!("{:<16}", name); ++ if args.flag_n.is_some() { ++ print_list_lines(commit.message(), args); ++ } else { ++ println!(""); ++ } ++} ++ ++fn print_name(name: &str) { ++ println!("{}", name); ++} ++ ++fn print_list_lines(message: Option<&str>, args: &Args) { ++ let message = match message { Some(s) => s, None => return }; ++ let mut lines = message.lines().filter(|l| !l.trim().is_empty()); ++ if let Some(first) = lines.next() { ++ print!("{}", first); ++ } ++ println!(""); ++ ++ for line in lines.take(args.flag_n.unwrap_or(0) as usize) { ++ print!(" {}", line); ++ } ++} ++ ++fn main() { ++ const USAGE: &'static str = " ++usage: ++ tag [-a] [-f] [-m ] [] ++ tag -d ++ tag [-n ] -l [] ++ ++Options: ++ -n specify number of lines from teh annotation to print ++ -f, --force replace an existing tag with the given name ++ -l, --list list tags with names matching the pattern given ++ -d, --delete delete the tag specified ++ -m, --message message for a new tag ++ -h, --help show this message ++"; ++ ++ let args = Docopt::new(USAGE).and_then(|d| d.deserialize()) ++ .unwrap_or_else(|e| e.exit()); ++ match run(&args) { ++ Ok(()) => {} ++ Err(e) => println!("error: {}", e), ++ } ++} diff --cc vendor/git2-0.7.5/src/blame.rs index 000000000,000000000..9f6b15571 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/blame.rs @@@ -1,0 -1,0 +1,315 @@@ ++use std::marker; ++use {raw, Repository, Oid, signature, Signature}; ++use util::{self, Binding}; ++use std::path::Path; ++use std::ops::Range; ++use std::mem; ++ ++/// Opaque structure to hold blame results. ++pub struct Blame<'repo> { ++ raw: *mut raw::git_blame, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++/// Structure that represents a blame hunk. ++pub struct BlameHunk<'blame> { ++ raw: *mut raw::git_blame_hunk, ++ _marker: marker::PhantomData<&'blame raw::git_blame>, ++} ++ ++/// Blame options ++pub struct BlameOptions { ++ raw: raw::git_blame_options, ++} ++ ++/// An iterator over the hunks in a blame. ++pub struct BlameIter<'blame> { ++ range: Range, ++ blame: &'blame Blame<'blame>, ++} ++ ++impl<'repo> Blame<'repo> { ++ ++ /// Gets the number of hunks that exist in the blame structure. ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_blame_get_hunk_count(self.raw) as usize } ++ } ++ ++ /// Return `true` is there is no hunk in the blame structure. ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Gets the blame hunk at the given index. ++ pub fn get_index(&self, index: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_blame_get_hunk_byindex(self.raw(), index as u32); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(BlameHunk::from_raw_const(ptr)) ++ } ++ } ++ } ++ ++ /// Gets the hunk that relates to the given line number in the newest ++ /// commit. ++ pub fn get_line(&self, lineno: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_blame_get_hunk_byline(self.raw(), lineno); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(BlameHunk::from_raw_const(ptr)) ++ } ++ } ++ } ++ ++ /// Returns an iterator over the hunks in this blame. ++ pub fn iter(&self) -> BlameIter { ++ BlameIter { range: 0..self.len(), blame: self } ++ } ++ ++} ++ ++impl<'blame> BlameHunk<'blame> { ++ ++ unsafe fn from_raw_const(raw: *const raw::git_blame_hunk) ++ -> BlameHunk<'blame> { ++ BlameHunk { ++ raw: raw as *mut raw::git_blame_hunk, ++ _marker: marker::PhantomData, ++ } ++ } ++ ++ /// Returns OID of the commit where this line was last changed ++ pub fn final_commit_id(&self) -> Oid { ++ unsafe { Oid::from_raw(&(*self.raw).final_commit_id) } ++ } ++ ++ /// Returns signature of the commit. ++ pub fn final_signature(&self) -> Signature { ++ unsafe { signature::from_raw_const(self, (*self.raw).final_signature) } ++ } ++ ++ /// Returns line number where this hunk begins. ++ /// ++ /// Note that the start line is counting from 1. ++ pub fn final_start_line(&self) -> usize { ++ unsafe { (*self.raw).final_start_line_number } ++ } ++ ++ /// Returns the OID of the commit where this hunk was found. ++ /// ++ /// This will usually be the same as `final_commit_id`, ++ /// except when `BlameOptions::track_copies_any_commit_copies` has been ++ /// turned on ++ pub fn orig_commit_id(&self) -> Oid { ++ unsafe { Oid::from_raw(&(*self.raw).orig_commit_id) } ++ } ++ ++ /// Returns signature of the commit. ++ pub fn orig_signature(&self) -> Signature { ++ unsafe { signature::from_raw_const(self, (*self.raw).orig_signature) } ++ } ++ ++ /// Returns line number where this hunk begins. ++ /// ++ /// Note that the start line is counting from 1. ++ pub fn orig_start_line(&self) -> usize { ++ unsafe { (*self.raw).orig_start_line_number} ++ } ++ ++ /// Returns path to the file where this hunk originated. ++ /// ++ /// Note: `None` could be returned for non-unicode paths on Widnows. ++ pub fn path(&self) -> Option<&Path> { ++ unsafe { ++ if let Some(bytes) = ::opt_bytes(self, (*self.raw).orig_path) { ++ Some(util::bytes2path(bytes)) ++ } else { ++ None ++ } ++ } ++ } ++ ++ /// Tests whether this hunk has been tracked to a boundary commit ++ /// (the root, or the commit specified in git_blame_options.oldest_commit). ++ pub fn is_boundary(&self) -> bool { ++ unsafe { (*self.raw).boundary == 1 } ++ } ++ ++ /// Returns number of lines in this hunk. ++ pub fn lines_in_hunk(&self) -> usize { ++ unsafe { (*self.raw).lines_in_hunk as usize } ++ } ++} ++ ++ ++impl Default for BlameOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl BlameOptions { ++ ++ /// Initialize options ++ pub fn new() -> BlameOptions { ++ unsafe { ++ let mut raw: raw::git_blame_options = mem::zeroed(); ++ assert_eq!( ++ raw::git_blame_init_options(&mut raw, ++ raw::GIT_BLAME_OPTIONS_VERSION) ++ , 0); ++ ++ Binding::from_raw(&raw as *const _ as *mut _) ++ } ++ } ++ ++ fn flag(&mut self, opt: u32, val: bool) -> &mut BlameOptions { ++ if val { ++ self.raw.flags |= opt; ++ } else { ++ self.raw.flags &= !opt; ++ } ++ self ++ } ++ ++ /// Track lines that have moved within a file. ++ pub fn track_copies_same_file(&mut self, opt: bool) -> &mut BlameOptions { ++ self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_FILE, opt) ++ } ++ ++ /// Track lines that have moved across files in the same commit. ++ pub fn track_copies_same_commit_moves(&mut self, opt: bool) -> &mut BlameOptions { ++ self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES, opt) ++ } ++ ++ /// Track lines that have been copied from another file that exists ++ /// in the same commit. ++ pub fn track_copies_same_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { ++ self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES, opt) ++ } ++ ++ /// Track lines that have been copied from another file that exists ++ /// in any commit. ++ pub fn track_copies_any_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { ++ self.flag(raw::GIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES, opt) ++ } ++ ++ /// Restrict the search of commits to those reachable following only ++ /// the first parents. ++ pub fn first_parent(&mut self, opt: bool) -> &mut BlameOptions { ++ self.flag(raw::GIT_BLAME_FIRST_PARENT, opt) ++ } ++ ++ /// Setter for the id of the newest commit to consider. ++ pub fn newest_commit(&mut self, id: Oid) -> &mut BlameOptions { ++ unsafe { self.raw.newest_commit = *id.raw(); } ++ self ++ } ++ ++ /// Setter for the id of the oldest commit to consider. ++ pub fn oldest_commit(&mut self, id: Oid) -> &mut BlameOptions { ++ unsafe { self.raw.oldest_commit = *id.raw(); } ++ self ++ } ++ ++} ++ ++impl<'repo> Binding for Blame<'repo> { ++ type Raw = *mut raw::git_blame; ++ ++ unsafe fn from_raw(raw: *mut raw::git_blame) -> Blame<'repo> { ++ Blame { raw: raw, _marker: marker::PhantomData } ++ } ++ ++ fn raw(&self) -> *mut raw::git_blame { self.raw } ++} ++ ++impl<'repo> Drop for Blame<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_blame_free(self.raw) } ++ } ++} ++ ++impl<'blame> Binding for BlameHunk<'blame> { ++ type Raw = *mut raw::git_blame_hunk; ++ ++ unsafe fn from_raw(raw: *mut raw::git_blame_hunk) -> BlameHunk<'blame> { ++ BlameHunk { raw: raw, _marker: marker::PhantomData } ++ } ++ ++ fn raw(&self) -> *mut raw::git_blame_hunk { self.raw } ++} ++ ++impl Binding for BlameOptions { ++ type Raw = *mut raw::git_blame_options; ++ ++ unsafe fn from_raw(opts: *mut raw::git_blame_options) -> BlameOptions { ++ BlameOptions { raw: *opts } ++ } ++ ++ fn raw(&self) -> *mut raw::git_blame_options { ++ &self.raw as *const _ as *mut _ ++ } ++} ++ ++impl<'blame> Iterator for BlameIter<'blame> { ++ type Item = BlameHunk<'blame>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.blame.get_index(i)) ++ } ++ ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++ ++impl<'blame> DoubleEndedIterator for BlameIter<'blame> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.blame.get_index(i)) ++ } ++} ++ ++impl<'blame> ExactSizeIterator for BlameIter<'blame> {} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::{self, File}; ++ use std::path::Path; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut index = repo.index().unwrap(); ++ ++ let root = repo.path().parent().unwrap(); ++ fs::create_dir(&root.join("foo")).unwrap(); ++ File::create(&root.join("foo/bar")).unwrap(); ++ index.add_path(Path::new("foo/bar")).unwrap(); ++ ++ let id = index.write_tree().unwrap(); ++ let tree = repo.find_tree(id).unwrap(); ++ let sig = repo.signature().unwrap(); ++ let id = repo.refname_to_id("HEAD").unwrap(); ++ let parent = repo.find_commit(id).unwrap(); ++ let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", ++ &tree, &[&parent]).unwrap(); ++ ++ let blame = repo.blame_file(Path::new("foo/bar"), None).unwrap(); ++ ++ assert_eq!(blame.len(), 1); ++ assert_eq!(blame.iter().count(), 1); ++ ++ let hunk = blame.get_index(0).unwrap(); ++ assert_eq!(hunk.final_commit_id(), commit); ++ assert_eq!(hunk.final_signature().name(), sig.name()); ++ assert_eq!(hunk.final_signature().email(), sig.email()); ++ assert_eq!(hunk.final_start_line(), 1); ++ assert_eq!(hunk.path(), Some(Path::new("foo/bar"))); ++ assert_eq!(hunk.lines_in_hunk(), 0); ++ assert!(!hunk.is_boundary()) ++ } ++ ++} ++ diff --cc vendor/git2-0.7.5/src/blob.rs index 000000000,000000000..5e255eb6d new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/blob.rs @@@ -1,0 -1,0 +1,186 @@@ ++use std::marker; ++use std::mem; ++use std::slice; ++use std::io; ++ ++use {raw, Oid, Object, Error}; ++use util::Binding; ++ ++/// A structure to represent a git [blob][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects ++pub struct Blob<'repo> { ++ raw: *mut raw::git_blob, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> Blob<'repo> { ++ /// Get the id (SHA1) of a repository blob ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_blob_id(&*self.raw)) } ++ } ++ ++ /// Determine if the blob content is most certainly binary or not. ++ pub fn is_binary(&self) -> bool { ++ unsafe { raw::git_blob_is_binary(&*self.raw) == 1 } ++ } ++ ++ /// Get the content of this blob. ++ pub fn content(&self) -> &[u8] { ++ unsafe { ++ let data = raw::git_blob_rawcontent(&*self.raw) as *const u8; ++ let len = raw::git_blob_rawsize(&*self.raw) as usize; ++ slice::from_raw_parts(data, len) ++ } ++ } ++ ++ /// Casts this Blob to be usable as an `Object` ++ pub fn as_object(&self) -> &Object<'repo> { ++ unsafe { ++ &*(self as *const _ as *const Object<'repo>) ++ } ++ } ++ ++ /// Consumes Blob to be returned as an `Object` ++ pub fn into_object(self) -> Object<'repo> { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ unsafe { ++ mem::transmute(self) ++ } ++ } ++} ++ ++impl<'repo> Binding for Blob<'repo> { ++ type Raw = *mut raw::git_blob; ++ ++ unsafe fn from_raw(raw: *mut raw::git_blob) -> Blob<'repo> { ++ Blob { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_blob { self.raw } ++} ++ ++impl<'repo> ::std::fmt::Debug for Blob<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ f.debug_struct("Blob").field("id", &self.id()).finish() ++ } ++} ++ ++impl<'repo> Clone for Blob<'repo> { ++ fn clone(&self) -> Self { ++ self.as_object().clone().into_blob().ok().unwrap() ++ } ++} ++ ++impl<'repo> Drop for Blob<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_blob_free(self.raw) } ++ } ++} ++ ++/// A structure to represent a git writestream for blobs ++pub struct BlobWriter<'repo> { ++ raw: *mut raw::git_writestream, ++ need_cleanup: bool, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> BlobWriter<'repo> { ++ /// Finalize blob writing stream and write the blob to the object db ++ pub fn commit(mut self) -> Result { ++ // After commit we already doesn't need cleanup on drop ++ self.need_cleanup = false; ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_blob_create_fromstream_commit(&mut raw, self.raw)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++} ++ ++impl<'repo> Binding for BlobWriter<'repo> { ++ type Raw = *mut raw::git_writestream; ++ ++ unsafe fn from_raw(raw: *mut raw::git_writestream) -> BlobWriter<'repo> { ++ BlobWriter { ++ raw: raw, ++ need_cleanup: true, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_writestream { self.raw } ++} ++ ++impl<'repo> Drop for BlobWriter<'repo> { ++ fn drop(&mut self) { ++ // We need cleanup in case the stream has not been committed ++ if self.need_cleanup { ++ unsafe { ((*self.raw).free)(self.raw) } ++ } ++ } ++} ++ ++impl<'repo> io::Write for BlobWriter<'repo> { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ unsafe { ++ let res = ((*self.raw).write)(self.raw, buf.as_ptr() as *const _, buf.len()); ++ if res < 0 { ++ Err(io::Error::new(io::ErrorKind::Other, "Write error")) ++ } else { ++ Ok(buf.len()) ++ } ++ } ++ } ++ fn flush(&mut self) -> io::Result<()> { Ok(()) } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ use std::fs::File; ++ use std::path::Path; ++ use tempdir::TempDir; ++ use Repository; ++ ++ #[test] ++ fn buffer() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let id = repo.blob(&[5, 4, 6]).unwrap(); ++ let blob = repo.find_blob(id).unwrap(); ++ ++ assert_eq!(blob.id(), id); ++ assert_eq!(blob.content(), [5, 4, 6]); ++ assert!(blob.is_binary()); ++ ++ repo.find_object(id, None).unwrap().as_blob().unwrap(); ++ repo.find_object(id, None).unwrap().into_blob().ok().unwrap(); ++ } ++ ++ #[test] ++ fn path() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path().join("foo"); ++ File::create(&path).unwrap().write_all(&[7, 8, 9]).unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let id = repo.blob_path(&path).unwrap(); ++ let blob = repo.find_blob(id).unwrap(); ++ assert_eq!(blob.content(), [7, 8, 9]); ++ blob.into_object(); ++ } ++ ++ #[test] ++ fn stream() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let mut ws = repo.blob_writer(Some(Path::new("foo"))).unwrap(); ++ let wl = ws.write(&[10, 11, 12]).unwrap(); ++ assert_eq!(wl, 3); ++ let id = ws.commit().unwrap(); ++ let blob = repo.find_blob(id).unwrap(); ++ assert_eq!(blob.content(), [10, 11, 12]); ++ blob.into_object(); ++ } ++} diff --cc vendor/git2-0.7.5/src/branch.rs index 000000000,000000000..3f035ed0d new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/branch.rs @@@ -1,0 -1,0 +1,162 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::ptr; ++use std::str; ++ ++use {raw, Error, Reference, BranchType, References}; ++use util::Binding; ++ ++/// A structure to represent a git [branch][1] ++/// ++/// A branch is currently just a wrapper to an underlying `Reference`. The ++/// reference can be accessed through the `get` and `unwrap` methods. ++/// ++/// [1]: http://git-scm.com/book/en/Git-Branching-What-a-Branch-Is ++pub struct Branch<'repo> { ++ inner: Reference<'repo>, ++} ++ ++/// An iterator over the branches inside of a repository. ++pub struct Branches<'repo> { ++ raw: *mut raw::git_branch_iterator, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> Branch<'repo> { ++ /// Creates Branch type from a Reference ++ pub fn wrap(reference: Reference) -> Branch { Branch { inner: reference } } ++ ++ /// Gain access to the reference that is this branch ++ pub fn get(&self) -> &Reference<'repo> { &self.inner } ++ ++ /// Take ownership of the underlying reference. ++ pub fn into_reference(self) -> Reference<'repo> { self.inner } ++ ++ /// Delete an existing branch reference. ++ pub fn delete(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_branch_delete(self.get().raw())); } ++ Ok(()) ++ } ++ ++ /// Determine if the current local branch is pointed at by HEAD. ++ pub fn is_head(&self) -> bool { ++ unsafe { raw::git_branch_is_head(&*self.get().raw()) == 1 } ++ } ++ ++ /// Move/rename an existing local branch reference. ++ pub fn rename(&mut self, new_branch_name: &str, force: bool) ++ -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ let new_branch_name = try!(CString::new(new_branch_name)); ++ unsafe { ++ try_call!(raw::git_branch_move(&mut ret, self.get().raw(), ++ new_branch_name, force)); ++ Ok(Branch::wrap(Binding::from_raw(ret))) ++ } ++ } ++ ++ /// Return the name of the given local or remote branch. ++ /// ++ /// May return `Ok(None)` if the name is not valid utf-8. ++ pub fn name(&self) -> Result, Error> { ++ self.name_bytes().map(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Return the name of the given local or remote branch. ++ pub fn name_bytes(&self) -> Result<&[u8], Error> { ++ let mut ret = ptr::null(); ++ unsafe { ++ try_call!(raw::git_branch_name(&mut ret, &*self.get().raw())); ++ Ok(::opt_bytes(self, ret).unwrap()) ++ } ++ } ++ ++ /// Return the reference supporting the remote tracking branch, given a ++ /// local branch reference. ++ pub fn upstream<'a>(&'a self) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_branch_upstream(&mut ret, &*self.get().raw())); ++ Ok(Branch::wrap(Binding::from_raw(ret))) ++ } ++ } ++ ++ /// Set the upstream configuration for a given local branch. ++ /// ++ /// If `None` is specified, then the upstream branch is unset. The name ++ /// provided is the name of the branch to set as upstream. ++ pub fn set_upstream(&mut self, ++ upstream_name: Option<&str>) -> Result<(), Error> { ++ let upstream_name = try!(::opt_cstr(upstream_name)); ++ unsafe { ++ try_call!(raw::git_branch_set_upstream(self.get().raw(), ++ upstream_name)); ++ Ok(()) ++ } ++ } ++} ++ ++impl<'repo> Branches<'repo> { ++ /// Creates a new iterator from the raw pointer given. ++ /// ++ /// This function is unsafe as it is not guaranteed that `raw` is a valid ++ /// pointer. ++ pub unsafe fn from_raw(raw: *mut raw::git_branch_iterator) ++ -> Branches<'repo> { ++ Branches { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++} ++ ++impl<'repo> Iterator for Branches<'repo> { ++ type Item = Result<(Branch<'repo>, BranchType), Error>; ++ fn next(&mut self) -> Option, BranchType), Error>> { ++ let mut ret = ptr::null_mut(); ++ let mut typ = raw::GIT_BRANCH_LOCAL; ++ unsafe { ++ try_call_iter!(raw::git_branch_next(&mut ret, &mut typ, self.raw)); ++ let typ = match typ { ++ raw::GIT_BRANCH_LOCAL => BranchType::Local, ++ raw::GIT_BRANCH_REMOTE => BranchType::Remote, ++ n => panic!("unexected branch type: {}", n), ++ }; ++ Some(Ok((Branch::wrap(Binding::from_raw(ret)), typ))) ++ } ++ } ++} ++ ++impl<'repo> Drop for Branches<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_branch_iterator_free(self.raw) } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use BranchType; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = repo.head().unwrap(); ++ let target = head.target().unwrap(); ++ let commit = repo.find_commit(target).unwrap(); ++ ++ let mut b1 = repo.branch("foo", &commit, false).unwrap(); ++ assert!(!b1.is_head()); ++ repo.branch("foo2", &commit, false).unwrap(); ++ ++ assert_eq!(repo.branches(None).unwrap().count(), 3); ++ repo.find_branch("foo", BranchType::Local).unwrap(); ++ let mut b1 = b1.rename("bar", false).unwrap(); ++ assert_eq!(b1.name().unwrap(), Some("bar")); ++ assert!(b1.upstream().is_err()); ++ b1.set_upstream(Some("master")).unwrap(); ++ b1.upstream().unwrap(); ++ b1.set_upstream(None).unwrap(); ++ ++ b1.delete().unwrap(); ++ } ++} diff --cc vendor/git2-0.7.5/src/buf.rs index 000000000,000000000..78e958e2e new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/buf.rs @@@ -1,0 -1,0 +1,73 @@@ ++use std::slice; ++use std::str; ++use std::ptr; ++use std::ops::{Deref, DerefMut}; ++ ++use raw; ++use util::Binding; ++ ++/// A structure to wrap an intermediate buffer used by libgit2. ++/// ++/// A buffer can be thought of a `Vec`, but the `Vec` type is not used to ++/// avoid copying data back and forth. ++pub struct Buf { ++ raw: raw::git_buf, ++} ++ ++impl Default for Buf { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl Buf { ++ /// Creates a new empty buffer. ++ pub fn new() -> Buf { ++ ::init(); ++ unsafe { ++ Binding::from_raw(&mut raw::git_buf { ++ ptr: ptr::null_mut(), ++ size: 0, ++ asize: 0, ++ } as *mut _) ++ } ++ } ++ ++ /// Attempt to view this buffer as a string slice. ++ /// ++ /// Returns `None` if the buffer is not valid utf-8. ++ pub fn as_str(&self) -> Option<&str> { str::from_utf8(&**self).ok() } ++} ++ ++impl Deref for Buf { ++ type Target = [u8]; ++ fn deref(&self) -> &[u8] { ++ unsafe { ++ slice::from_raw_parts(self.raw.ptr as *const u8, ++ self.raw.size as usize) ++ } ++ } ++} ++ ++impl DerefMut for Buf { ++ fn deref_mut(&mut self) -> &mut [u8] { ++ unsafe { ++ slice::from_raw_parts_mut(self.raw.ptr as *mut u8, ++ self.raw.size as usize) ++ } ++ } ++} ++ ++impl Binding for Buf { ++ type Raw = *mut raw::git_buf; ++ unsafe fn from_raw(raw: *mut raw::git_buf) -> Buf { ++ Buf { raw: *raw } ++ } ++ fn raw(&self) -> *mut raw::git_buf { &self.raw as *const _ as *mut _ } ++} ++ ++impl Drop for Buf { ++ fn drop(&mut self) { ++ unsafe { raw::git_buf_free(&mut self.raw) } ++ } ++} diff --cc vendor/git2-0.7.5/src/build.rs index 000000000,000000000..4e4d2959a new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/build.rs @@@ -1,0 -1,0 +1,638 @@@ ++//! Builder-pattern objects for configuration various git operations. ++ ++use std::ffi::{CStr, CString}; ++use std::mem; ++use std::path::Path; ++use std::ptr; ++use libc::{c_char, size_t, c_void, c_uint, c_int}; ++ ++use {raw, panic, Error, Repository, FetchOptions, IntoCString}; ++use {CheckoutNotificationType, DiffFile, Remote}; ++use util::{self, Binding}; ++ ++/// A builder struct which is used to build configuration for cloning a new git ++/// repository. ++pub struct RepoBuilder<'cb> { ++ bare: bool, ++ branch: Option, ++ local: bool, ++ hardlinks: bool, ++ checkout: Option>, ++ fetch_opts: Option>, ++ clone_local: Option, ++ remote_create: Option>>, ++} ++ ++/// Type of callback passed to `RepoBuilder::remote_create`. ++/// ++/// The second and third arguments are the remote's name and the remote's url. ++pub type RemoteCreate<'cb> = for<'a> FnMut(&'a Repository, &str, &str) ++ -> Result, Error> + 'cb; ++ ++/// A builder struct for configuring checkouts of a repository. ++pub struct CheckoutBuilder<'cb> { ++ their_label: Option, ++ our_label: Option, ++ ancestor_label: Option, ++ target_dir: Option, ++ paths: Vec, ++ path_ptrs: Vec<*const c_char>, ++ file_perm: Option, ++ dir_perm: Option, ++ disable_filters: bool, ++ checkout_opts: u32, ++ progress: Option>>, ++ notify: Option>>, ++ notify_flags: CheckoutNotificationType, ++} ++ ++/// Checkout progress notification callback. ++/// ++/// The first argument is the path for the notification, the next is the numver ++/// of completed steps so far, and the final is the total number of steps. ++pub type Progress<'a> = FnMut(Option<&Path>, usize, usize) + 'a; ++ ++/// Checkout notifications callback. ++/// ++/// The first argument is the notification type, the next is the path for the ++/// the notification, followed by the baseline diff, target diff, and workdir diff. ++/// ++/// The callback must return a bool specifying whether the checkout should ++/// continue. ++pub type Notify<'a> = FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, ++ DiffFile, DiffFile) -> bool + 'a; ++ ++ ++impl<'cb> Default for RepoBuilder<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++/// Options that can be passed to `RepoBuilder::clone_local`. ++#[derive(Clone, Copy)] ++pub enum CloneLocal { ++ /// Auto-detect (default) ++ /// ++ /// Here libgit2 will bypass the git-aware transport for local paths, but ++ /// use a normal fetch for `file://` urls. ++ Auto = raw::GIT_CLONE_LOCAL_AUTO as isize, ++ ++ /// Bypass the git-aware transport even for `file://` urls. ++ Local = raw::GIT_CLONE_LOCAL as isize, ++ ++ /// Never bypass the git-aware transport ++ None = raw::GIT_CLONE_NO_LOCAL as isize, ++ ++ /// Bypass the git-aware transport, but don't try to use hardlinks. ++ NoLinks = raw::GIT_CLONE_LOCAL_NO_LINKS as isize, ++ ++ #[doc(hidden)] ++ __Nonexhaustive = 0xff, ++} ++ ++impl<'cb> RepoBuilder<'cb> { ++ /// Creates a new repository builder with all of the default configuration. ++ /// ++ /// When ready, the `clone()` method can be used to clone a new repository ++ /// using this configuration. ++ pub fn new() -> RepoBuilder<'cb> { ++ ::init(); ++ RepoBuilder { ++ bare: false, ++ branch: None, ++ local: true, ++ clone_local: None, ++ hardlinks: true, ++ checkout: None, ++ fetch_opts: None, ++ remote_create: None, ++ } ++ } ++ ++ /// Indicate whether the repository will be cloned as a bare repository or ++ /// not. ++ pub fn bare(&mut self, bare: bool) -> &mut RepoBuilder<'cb> { ++ self.bare = bare; ++ self ++ } ++ ++ /// Specify the name of the branch to check out after the clone. ++ /// ++ /// If not specified, the remote's default branch will be used. ++ pub fn branch(&mut self, branch: &str) -> &mut RepoBuilder<'cb> { ++ self.branch = Some(CString::new(branch).unwrap()); ++ self ++ } ++ ++ /// Configures options for bypassing the git-aware transport on clone. ++ /// ++ /// Bypassing it means that instead of a fetch libgit2 will copy the object ++ /// database directory instead of figuring out what it needs, which is ++ /// faster. If possible, it will hardlink the files to save space. ++ pub fn clone_local(&mut self, clone_local: CloneLocal) -> &mut RepoBuilder<'cb> { ++ self.clone_local = Some(clone_local); ++ self ++ } ++ ++ /// Set the flag for bypassing the git aware transport mechanism for local ++ /// paths. ++ /// ++ /// If `true`, the git-aware transport will be bypassed for local paths. If ++ /// `false`, the git-aware transport will not be bypassed. ++ #[deprecated(note = "use `clone_local` instead")] ++ #[doc(hidden)] ++ pub fn local(&mut self, local: bool) -> &mut RepoBuilder<'cb> { ++ self.local = local; ++ self ++ } ++ ++ /// Set the flag for whether hardlinks are used when using a local git-aware ++ /// transport mechanism. ++ #[deprecated(note = "use `clone_local` instead")] ++ #[doc(hidden)] ++ pub fn hardlinks(&mut self, links: bool) -> &mut RepoBuilder<'cb> { ++ self.hardlinks = links; ++ self ++ } ++ ++ /// Configure the checkout which will be performed by consuming a checkout ++ /// builder. ++ pub fn with_checkout(&mut self, checkout: CheckoutBuilder<'cb>) ++ -> &mut RepoBuilder<'cb> { ++ self.checkout = Some(checkout); ++ self ++ } ++ ++ /// Options which control the fetch, including callbacks. ++ /// ++ /// The callbacks are used for reporting fetch progress, and for acquiring ++ /// credentials in the event they are needed. ++ pub fn fetch_options(&mut self, fetch_opts: FetchOptions<'cb>) ++ -> &mut RepoBuilder<'cb> { ++ self.fetch_opts = Some(fetch_opts); ++ self ++ } ++ ++ /// Configures a callback used to create the git remote, prior to its being ++ /// used to perform the clone operation. ++ pub fn remote_create(&mut self, f: F) -> &mut RepoBuilder<'cb> ++ where F: for<'a> FnMut(&'a Repository, &str, &str) ++ -> Result, Error> + 'cb, ++ { ++ self.remote_create = Some(Box::new(f)); ++ self ++ } ++ ++ /// Clone a remote repository. ++ /// ++ /// This will use the options configured so far to clone the specified url ++ /// into the specified local path. ++ pub fn clone(&mut self, url: &str, into: &Path) -> Result { ++ let mut opts: raw::git_clone_options = unsafe { mem::zeroed() }; ++ unsafe { ++ try_call!(raw::git_clone_init_options(&mut opts, ++ raw::GIT_CLONE_OPTIONS_VERSION)); ++ } ++ opts.bare = self.bare as c_int; ++ opts.checkout_branch = self.branch.as_ref().map(|s| { ++ s.as_ptr() ++ }).unwrap_or(ptr::null()); ++ ++ if let Some(ref local) = self.clone_local { ++ opts.local = *local as raw::git_clone_local_t; ++ } else { ++ opts.local = match (self.local, self.hardlinks) { ++ (true, false) => raw::GIT_CLONE_LOCAL_NO_LINKS, ++ (false, _) => raw::GIT_CLONE_NO_LOCAL, ++ (true, _) => raw::GIT_CLONE_LOCAL_AUTO, ++ }; ++ } ++ ++ if let Some(ref mut cbs) = self.fetch_opts { ++ opts.fetch_opts = cbs.raw(); ++ } ++ ++ if let Some(ref mut c) = self.checkout { ++ unsafe { ++ c.configure(&mut opts.checkout_opts); ++ } ++ } ++ ++ if let Some(ref mut callback) = self.remote_create { ++ opts.remote_cb = Some(remote_create_cb); ++ opts.remote_cb_payload = callback as *mut _ as *mut _; ++ } ++ ++ let url = try!(CString::new(url)); ++ let into = try!(into.into_c_string()); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_clone(&mut raw, url, into, &opts)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++} ++ ++extern fn remote_create_cb(out: *mut *mut raw::git_remote, ++ repo: *mut raw::git_repository, ++ name: *const c_char, ++ url: *const c_char, ++ payload: *mut c_void) -> c_int { ++ unsafe { ++ let repo = Repository::from_raw(repo); ++ let code = panic::wrap(|| { ++ let name = CStr::from_ptr(name).to_str().unwrap(); ++ let url = CStr::from_ptr(url).to_str().unwrap(); ++ let f = payload as *mut Box; ++ match (*f)(&repo, name, url) { ++ Ok(remote) => { ++ *out = ::remote::remote_into_raw(remote); ++ 0 ++ } ++ Err(e) => e.raw_code(), ++ } ++ }); ++ mem::forget(repo); ++ code.unwrap_or(-1) ++ } ++} ++ ++impl<'cb> Default for CheckoutBuilder<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl<'cb> CheckoutBuilder<'cb> { ++ /// Creates a new builder for checkouts with all of its default ++ /// configuration. ++ pub fn new() -> CheckoutBuilder<'cb> { ++ ::init(); ++ CheckoutBuilder { ++ disable_filters: false, ++ dir_perm: None, ++ file_perm: None, ++ path_ptrs: Vec::new(), ++ paths: Vec::new(), ++ target_dir: None, ++ ancestor_label: None, ++ our_label: None, ++ their_label: None, ++ checkout_opts: raw::GIT_CHECKOUT_SAFE as u32, ++ progress: None, ++ notify: None, ++ notify_flags: CheckoutNotificationType::empty(), ++ } ++ } ++ ++ /// Indicate that this checkout should perform a dry run by checking for ++ /// conflicts but not make any actual changes. ++ pub fn dry_run(&mut self) -> &mut CheckoutBuilder<'cb> { ++ self.checkout_opts &= !((1 << 4) - 1); ++ self.checkout_opts |= raw::GIT_CHECKOUT_NONE as u32; ++ self ++ } ++ ++ /// Take any action necessary to get the working directory to match the ++ /// target including potentially discarding modified files. ++ pub fn force(&mut self) -> &mut CheckoutBuilder<'cb> { ++ self.checkout_opts &= !((1 << 4) - 1); ++ self.checkout_opts |= raw::GIT_CHECKOUT_FORCE as u32; ++ self ++ } ++ ++ /// Indicate that the checkout should be performed safely, allowing new ++ /// files to be created but not overwriting extisting files or changes. ++ /// ++ /// This is the default. ++ pub fn safe(&mut self) -> &mut CheckoutBuilder<'cb> { ++ self.checkout_opts &= !((1 << 4) - 1); ++ self.checkout_opts |= raw::GIT_CHECKOUT_SAFE as u32; ++ self ++ } ++ ++ fn flag(&mut self, bit: raw::git_checkout_strategy_t, ++ on: bool) -> &mut CheckoutBuilder<'cb> { ++ if on { ++ self.checkout_opts |= bit as u32; ++ } else { ++ self.checkout_opts &= !(bit as u32); ++ } ++ self ++ } ++ ++ /// In safe mode, create files that don't exist. ++ /// ++ /// Defaults to false. ++ pub fn recreate_missing(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_RECREATE_MISSING, allow) ++ } ++ ++ /// In safe mode, apply safe file updates even when there are conflicts ++ /// instead of canceling the checkout. ++ /// ++ /// Defaults to false. ++ pub fn allow_conflicts(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_ALLOW_CONFLICTS, allow) ++ } ++ ++ /// Remove untracked files from the working dir. ++ /// ++ /// Defaults to false. ++ pub fn remove_untracked(&mut self, remove: bool) ++ -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_REMOVE_UNTRACKED, remove) ++ } ++ ++ /// Remove ignored files from the working dir. ++ /// ++ /// Defaults to false. ++ pub fn remove_ignored(&mut self, remove: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_REMOVE_IGNORED, remove) ++ } ++ ++ /// Only update the contents of files that already exist. ++ /// ++ /// If set, files will not be created or deleted. ++ /// ++ /// Defaults to false. ++ pub fn update_only(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_UPDATE_ONLY, update) ++ } ++ ++ /// Prevents checkout from writing the updated files' information to the ++ /// index. ++ /// ++ /// Defaults to true. ++ pub fn update_index(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_DONT_UPDATE_INDEX, !update) ++ } ++ ++ /// Indicate whether the index and git attributes should be refreshed from ++ /// disk before any operations. ++ /// ++ /// Defaults to true, ++ pub fn refresh(&mut self, refresh: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_NO_REFRESH, !refresh) ++ } ++ ++ /// Skip files with unmerged index entries. ++ /// ++ /// Defaults to false. ++ pub fn skip_unmerged(&mut self, skip: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_SKIP_UNMERGED, skip) ++ } ++ ++ /// Indicate whether the checkout should proceed on conflicts by using the ++ /// stage 2 version of the file ("ours"). ++ /// ++ /// Defaults to false. ++ pub fn use_ours(&mut self, ours: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_USE_OURS, ours) ++ } ++ ++ /// Indicate whether the checkout should proceed on conflicts by using the ++ /// stage 3 version of the file ("theirs"). ++ /// ++ /// Defaults to false. ++ pub fn use_theirs(&mut self, theirs: bool) -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_USE_THEIRS, theirs) ++ } ++ ++ /// Indicate whether ignored files should be overwritten during the checkout. ++ /// ++ /// Defaults to true. ++ pub fn overwrite_ignored(&mut self, overwrite: bool) ++ -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_DONT_OVERWRITE_IGNORED, !overwrite) ++ } ++ ++ /// Indicate whether a normal merge file should be written for conflicts. ++ /// ++ /// Defaults to false. ++ pub fn conflict_style_merge(&mut self, on: bool) ++ -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_MERGE, on) ++ } ++ ++ /// Specify for which notification types to invoke the notification ++ /// callback. ++ /// ++ /// Defaults to none. ++ pub fn notify_on(&mut self, notification_types: CheckoutNotificationType) ++ -> &mut CheckoutBuilder<'cb> { ++ self.notify_flags = notification_types; ++ self ++ } ++ ++ /// Indicates whether to include common ancestor data in diff3 format files ++ /// for conflicts. ++ /// ++ /// Defaults to false. ++ pub fn conflict_style_diff3(&mut self, on: bool) ++ -> &mut CheckoutBuilder<'cb> { ++ self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_DIFF3, on) ++ } ++ ++ /// Indicate whether to apply filters like CRLF conversion. ++ pub fn disable_filters(&mut self, disable: bool) ++ -> &mut CheckoutBuilder<'cb> { ++ self.disable_filters = disable; ++ self ++ } ++ ++ /// Set the mode with which new directories are created. ++ /// ++ /// Default is 0755 ++ pub fn dir_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { ++ self.dir_perm = Some(perm); ++ self ++ } ++ ++ /// Set the mode with which new files are created. ++ /// ++ /// The default is 0644 or 0755 as dictated by the blob. ++ pub fn file_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { ++ self.file_perm = Some(perm); ++ self ++ } ++ ++ /// Add a path to be checked out. ++ /// ++ /// If no paths are specified, then all files are checked out. Otherwise ++ /// only these specified paths are checked out. ++ pub fn path(&mut self, path: T) ++ -> &mut CheckoutBuilder<'cb> { ++ let path = path.into_c_string().unwrap(); ++ self.path_ptrs.push(path.as_ptr()); ++ self.paths.push(path); ++ self ++ } ++ ++ /// Set the directory to check out to ++ pub fn target_dir(&mut self, dst: &Path) -> &mut CheckoutBuilder<'cb> { ++ self.target_dir = Some(dst.into_c_string().unwrap()); ++ self ++ } ++ ++ /// The name of the common ancestor side of conflicts ++ pub fn ancestor_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { ++ self.ancestor_label = Some(CString::new(label).unwrap()); ++ self ++ } ++ ++ /// The name of the common our side of conflicts ++ pub fn our_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { ++ self.our_label = Some(CString::new(label).unwrap()); ++ self ++ } ++ ++ /// The name of the common their side of conflicts ++ pub fn their_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { ++ self.their_label = Some(CString::new(label).unwrap()); ++ self ++ } ++ ++ /// Set a callback to receive notifications of checkout progress. ++ pub fn progress(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> ++ where F: FnMut(Option<&Path>, usize, usize) + 'cb { ++ self.progress = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// Set a callback to receive checkout notifications. ++ /// ++ /// Callbacks are invoked prior to modifying any files on disk. ++ /// Returning `false` from the callback will cancel the checkout. ++ pub fn notify(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> ++ where F: FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, ++ DiffFile, DiffFile) -> bool + 'cb ++ { ++ self.notify = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// Configure a raw checkout options based on this configuration. ++ /// ++ /// This method is unsafe as there is no guarantee that this structure will ++ /// outlive the provided checkout options. ++ pub unsafe fn configure(&mut self, opts: &mut raw::git_checkout_options) { ++ opts.version = raw::GIT_CHECKOUT_OPTIONS_VERSION; ++ opts.disable_filters = self.disable_filters as c_int; ++ opts.dir_mode = self.dir_perm.unwrap_or(0) as c_uint; ++ opts.file_mode = self.file_perm.unwrap_or(0) as c_uint; ++ ++ if !self.path_ptrs.is_empty() { ++ opts.paths.strings = self.path_ptrs.as_ptr() as *mut _; ++ opts.paths.count = self.path_ptrs.len() as size_t; ++ } ++ ++ if let Some(ref c) = self.target_dir { ++ opts.target_directory = c.as_ptr(); ++ } ++ if let Some(ref c) = self.ancestor_label { ++ opts.ancestor_label = c.as_ptr(); ++ } ++ if let Some(ref c) = self.our_label { ++ opts.our_label = c.as_ptr(); ++ } ++ if let Some(ref c) = self.their_label { ++ opts.their_label = c.as_ptr(); ++ } ++ if self.progress.is_some() { ++ let f: raw::git_checkout_progress_cb = progress_cb; ++ opts.progress_cb = Some(f); ++ opts.progress_payload = self as *mut _ as *mut _; ++ } ++ if self.notify.is_some() { ++ let f: raw::git_checkout_notify_cb = notify_cb; ++ opts.notify_cb = Some(f); ++ opts.notify_payload = self as *mut _ as *mut _; ++ opts.notify_flags = self.notify_flags.bits() as c_uint; ++ } ++ opts.checkout_strategy = self.checkout_opts as c_uint; ++ } ++} ++ ++extern fn progress_cb(path: *const c_char, ++ completed: size_t, ++ total: size_t, ++ data: *mut c_void) { ++ panic::wrap(|| unsafe { ++ let payload = &mut *(data as *mut CheckoutBuilder); ++ let callback = match payload.progress { ++ Some(ref mut c) => c, ++ None => return, ++ }; ++ let path = if path.is_null() { ++ None ++ } else { ++ Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) ++ }; ++ callback(path, completed as usize, total as usize) ++ }); ++} ++ ++extern fn notify_cb(why: raw::git_checkout_notify_t, ++ path: *const c_char, ++ baseline: *const raw::git_diff_file, ++ target: *const raw::git_diff_file, ++ workdir: *const raw::git_diff_file, ++ data: *mut c_void) -> c_int { ++ // pack callback etc ++ panic::wrap(|| unsafe { ++ let payload = &mut *(data as *mut CheckoutBuilder); ++ let callback = match payload.notify { ++ Some(ref mut c) => c, ++ None => return 0, ++ }; ++ let path = if path.is_null() { ++ None ++ } else { ++ Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) ++ }; ++ ++ let why = CheckoutNotificationType::from_bits_truncate(why as u32); ++ let keep_going = callback(why, ++ path, ++ DiffFile::from_raw(baseline), ++ DiffFile::from_raw(target), ++ DiffFile::from_raw(workdir)); ++ if keep_going {0} else {1} ++ }).unwrap_or(2) ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs; ++ use std::path::Path; ++ use tempdir::TempDir; ++ use super::RepoBuilder; ++ use Repository; ++ ++ #[test] ++ fn smoke() { ++ let r = RepoBuilder::new().clone("/path/to/nowhere", Path::new("foo")); ++ assert!(r.is_err()); ++ } ++ ++ #[test] ++ fn smoke2() { ++ let td = TempDir::new("test").unwrap(); ++ Repository::init_bare(&td.path().join("bare")).unwrap(); ++ let url = if cfg!(unix) { ++ format!("file://{}/bare", td.path().display()) ++ } else { ++ format!("file:///{}/bare", td.path().display().to_string() ++ .replace("\\", "/")) ++ }; ++ ++ let dst = td.path().join("foo"); ++ RepoBuilder::new().clone(&url, &dst).unwrap(); ++ fs::remove_dir_all(&dst).unwrap(); ++ assert!(RepoBuilder::new().branch("foo") ++ .clone(&url, &dst).is_err()); ++ } ++ ++} diff --cc vendor/git2-0.7.5/src/call.rs index 000000000,000000000..3367275bf new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/call.rs @@@ -1,0 -1,0 +1,217 @@@ ++#![macro_use] ++use libc; ++ ++use Error; ++ ++macro_rules! call { ++ (raw::$p:ident ($($e:expr),*)) => ( ++ raw::$p($(::call::convert(&$e)),*) ++ ) ++} ++ ++macro_rules! try_call { ++ (raw::$p:ident ($($e:expr),*)) => ({ ++ match ::call::try(raw::$p($(::call::convert(&$e)),*)) { ++ Ok(o) => o, ++ Err(e) => { ::panic::check(); return Err(e) } ++ } ++ }) ++} ++ ++macro_rules! try_call_iter { ++ ($($f:tt)*) => { ++ match call!($($f)*) { ++ 0 => {} ++ raw::GIT_ITEROVER => return None, ++ e => return Some(Err(::call::last_error(e))) ++ } ++ } ++} ++ ++#[doc(hidden)] ++pub trait Convert { ++ fn convert(&self) -> T; ++} ++ ++pub fn convert>(u: &U) -> T { u.convert() } ++ ++pub fn try(ret: libc::c_int) -> Result { ++ match ret { ++ n if n < 0 => Err(last_error(n)), ++ n => Ok(n), ++ } ++} ++ ++pub fn last_error(code: libc::c_int) -> Error { ++ // nowadays this unwrap is safe as `Error::last_error` always returns ++ // `Some`. ++ Error::last_error(code).unwrap() ++} ++ ++mod impls { ++ use std::ffi::CString; ++ use std::ptr; ++ ++ use libc; ++ ++ use {raw, ConfigLevel, ResetType, ObjectType, BranchType, Direction}; ++ use {DiffFormat, FileFavor, SubmoduleIgnore, AutotagOption, FetchPrune}; ++ use call::Convert; ++ ++ impl Convert for T { ++ fn convert(&self) -> T { *self } ++ } ++ ++ impl Convert for bool { ++ fn convert(&self) -> libc::c_int { *self as libc::c_int } ++ } ++ impl<'a, T> Convert<*const T> for &'a T { ++ fn convert(&self) -> *const T { *self as *const T } ++ } ++ impl<'a, T> Convert<*mut T> for &'a mut T { ++ fn convert(&self) -> *mut T { &**self as *const T as *mut T } ++ } ++ impl Convert<*const T> for *mut T { ++ fn convert(&self) -> *const T { *self as *const T } ++ } ++ ++ impl Convert<*const libc::c_char> for CString { ++ fn convert(&self) -> *const libc::c_char { self.as_ptr() } ++ } ++ ++ impl> Convert<*const T> for Option { ++ fn convert(&self) -> *const T { ++ self.as_ref().map(|s| s.convert()).unwrap_or(ptr::null()) ++ } ++ } ++ ++ impl> Convert<*mut T> for Option { ++ fn convert(&self) -> *mut T { ++ self.as_ref().map(|s| s.convert()).unwrap_or(ptr::null_mut()) ++ } ++ } ++ ++ impl Convert for ResetType { ++ fn convert(&self) -> raw::git_reset_t { ++ match *self { ++ ResetType::Soft => raw::GIT_RESET_SOFT, ++ ResetType::Hard => raw::GIT_RESET_HARD, ++ ResetType::Mixed => raw::GIT_RESET_MIXED, ++ } ++ } ++ } ++ ++ impl Convert for Direction { ++ fn convert(&self) -> raw::git_direction { ++ match *self { ++ Direction::Push => raw::GIT_DIRECTION_PUSH, ++ Direction::Fetch => raw::GIT_DIRECTION_FETCH, ++ } ++ } ++ } ++ ++ impl Convert for ObjectType { ++ fn convert(&self) -> raw::git_otype { ++ match *self { ++ ObjectType::Any => raw::GIT_OBJ_ANY, ++ ObjectType::Commit => raw::GIT_OBJ_COMMIT, ++ ObjectType::Tree => raw::GIT_OBJ_TREE, ++ ObjectType::Blob => raw::GIT_OBJ_BLOB, ++ ObjectType::Tag => raw::GIT_OBJ_TAG, ++ } ++ } ++ } ++ ++ impl Convert for Option { ++ fn convert(&self) -> raw::git_otype { ++ self.unwrap_or(ObjectType::Any).convert() ++ } ++ } ++ ++ impl Convert for BranchType { ++ fn convert(&self) -> raw::git_branch_t { ++ match *self { ++ BranchType::Remote => raw::GIT_BRANCH_REMOTE, ++ BranchType::Local => raw::GIT_BRANCH_LOCAL, ++ } ++ } ++ } ++ ++ impl Convert for Option { ++ fn convert(&self) -> raw::git_branch_t { ++ self.map(|s| s.convert()).unwrap_or(raw::GIT_BRANCH_ALL) ++ } ++ } ++ ++ impl Convert for ConfigLevel { ++ fn convert(&self) -> raw::git_config_level_t { ++ match *self { ++ ConfigLevel::ProgramData => raw::GIT_CONFIG_LEVEL_PROGRAMDATA, ++ ConfigLevel::System => raw::GIT_CONFIG_LEVEL_SYSTEM, ++ ConfigLevel::XDG => raw::GIT_CONFIG_LEVEL_XDG, ++ ConfigLevel::Global => raw::GIT_CONFIG_LEVEL_GLOBAL, ++ ConfigLevel::Local => raw::GIT_CONFIG_LEVEL_LOCAL, ++ ConfigLevel::App => raw::GIT_CONFIG_LEVEL_APP, ++ ConfigLevel::Highest => raw::GIT_CONFIG_HIGHEST_LEVEL, ++ } ++ } ++ } ++ ++ impl Convert for DiffFormat { ++ fn convert(&self) -> raw::git_diff_format_t { ++ match *self { ++ DiffFormat::Patch => raw::GIT_DIFF_FORMAT_PATCH, ++ DiffFormat::PatchHeader => raw::GIT_DIFF_FORMAT_PATCH_HEADER, ++ DiffFormat::Raw => raw::GIT_DIFF_FORMAT_RAW, ++ DiffFormat::NameOnly => raw::GIT_DIFF_FORMAT_NAME_ONLY, ++ DiffFormat::NameStatus => raw::GIT_DIFF_FORMAT_NAME_STATUS, ++ } ++ } ++ } ++ ++ impl Convert for FileFavor { ++ fn convert(&self) -> raw::git_merge_file_favor_t { ++ match *self { ++ FileFavor::Normal => raw::GIT_MERGE_FILE_FAVOR_NORMAL, ++ FileFavor::Ours => raw::GIT_MERGE_FILE_FAVOR_OURS, ++ FileFavor::Theirs => raw::GIT_MERGE_FILE_FAVOR_THEIRS, ++ FileFavor::Union => raw::GIT_MERGE_FILE_FAVOR_UNION, ++ } ++ } ++ } ++ ++ impl Convert for SubmoduleIgnore { ++ fn convert(&self) -> raw::git_submodule_ignore_t { ++ match *self { ++ SubmoduleIgnore::Unspecified => ++ raw::GIT_SUBMODULE_IGNORE_UNSPECIFIED, ++ SubmoduleIgnore::None => raw::GIT_SUBMODULE_IGNORE_NONE, ++ SubmoduleIgnore::Untracked => raw::GIT_SUBMODULE_IGNORE_UNTRACKED, ++ SubmoduleIgnore::Dirty => raw::GIT_SUBMODULE_IGNORE_DIRTY, ++ SubmoduleIgnore::All => raw::GIT_SUBMODULE_IGNORE_ALL, ++ } ++ } ++ } ++ ++ impl Convert for AutotagOption { ++ fn convert(&self) -> raw::git_remote_autotag_option_t { ++ match *self { ++ AutotagOption::Unspecified => ++ raw::GIT_REMOTE_DOWNLOAD_TAGS_UNSPECIFIED, ++ AutotagOption::None => raw::GIT_REMOTE_DOWNLOAD_TAGS_NONE, ++ AutotagOption::Auto => raw::GIT_REMOTE_DOWNLOAD_TAGS_AUTO, ++ AutotagOption::All => raw::GIT_REMOTE_DOWNLOAD_TAGS_ALL, ++ } ++ } ++ } ++ ++ impl Convert for FetchPrune { ++ fn convert(&self) -> raw::git_fetch_prune_t { ++ match *self { ++ FetchPrune::Unspecified => raw::GIT_FETCH_PRUNE_UNSPECIFIED, ++ FetchPrune::On => raw::GIT_FETCH_PRUNE, ++ FetchPrune::Off => raw::GIT_FETCH_NO_PRUNE, ++ } ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/cert.rs index 000000000,000000000..70ab9498f new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/cert.rs @@@ -1,0 -1,0 +1,97 @@@ ++//! Certificate types which are passed to `CertificateCheck` in ++//! `RemoteCallbacks`. ++ ++use std::marker; ++use std::mem; ++use std::slice; ++ ++use raw; ++use util::Binding; ++ ++/// A certificate for a remote connection, viewable as one of `CertHostkey` or ++/// `CertX509` currently. ++pub struct Cert<'a> { ++ raw: *mut raw::git_cert, ++ _marker: marker::PhantomData<&'a raw::git_cert>, ++} ++ ++/// Hostkey information taken from libssh2 ++pub struct CertHostkey<'a> { ++ raw: *mut raw::git_cert_hostkey, ++ _marker: marker::PhantomData<&'a raw::git_cert>, ++} ++ ++/// X.509 certificate information ++pub struct CertX509<'a> { ++ raw: *mut raw::git_cert_x509, ++ _marker: marker::PhantomData<&'a raw::git_cert>, ++} ++ ++impl<'a> Cert<'a> { ++ /// Attempt to view this certificate as an SSH hostkey. ++ /// ++ /// Returns `None` if this is not actually an SSH hostkey. ++ pub fn as_hostkey(&self) -> Option<&CertHostkey<'a>> { ++ self.cast(raw::GIT_CERT_HOSTKEY_LIBSSH2) ++ } ++ ++ /// Attempt to view this certificate as an X.509 certificate. ++ /// ++ /// Returns `None` if this is not actually an X.509 certificate. ++ pub fn as_x509(&self) -> Option<&CertX509<'a>> { ++ self.cast(raw::GIT_CERT_X509) ++ } ++ ++ fn cast(&self, kind: raw::git_cert_t) -> Option<&T> { ++ assert_eq!(mem::size_of::>(), mem::size_of::()); ++ unsafe { ++ if kind == (*self.raw).cert_type { ++ Some(&*(self as *const Cert<'a> as *const T)) ++ } else { ++ None ++ } ++ } ++ } ++} ++ ++impl<'a> CertHostkey<'a> { ++ /// Returns the md5 hash of the hostkey, if available. ++ pub fn hash_md5(&self) -> Option<&[u8; 16]> { ++ unsafe { ++ if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_MD5 as u32 == 0 { ++ None ++ } else { ++ Some(&(*self.raw).hash_md5) ++ } ++ } ++ } ++ ++ /// Returns the SHA-1 hash of the hostkey, if available. ++ pub fn hash_sha1(&self) -> Option<&[u8; 20]> { ++ unsafe { ++ if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_SHA1 as u32 == 0 { ++ None ++ } else { ++ Some(&(*self.raw).hash_sha1) ++ } ++ } ++ } ++} ++ ++impl<'a> CertX509<'a> { ++ /// Return the X.509 certificate data as a byte slice ++ pub fn data(&self) -> &[u8] { ++ unsafe { ++ slice::from_raw_parts((*self.raw).data as *const u8, ++ (*self.raw).len as usize) ++ } ++ } ++} ++ ++impl<'a> Binding for Cert<'a> { ++ type Raw = *mut raw::git_cert; ++ unsafe fn from_raw(raw: *mut raw::git_cert) -> Cert<'a> { ++ Cert { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_cert { self.raw } ++} diff --cc vendor/git2-0.7.5/src/commit.rs index 000000000,000000000..f6086309c new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/commit.rs @@@ -1,0 -1,0 +1,359 @@@ ++use std::marker; ++use std::mem; ++use std::ops::Range; ++use std::ptr; ++use std::str; ++use libc; ++ ++use {raw, signature, Oid, Error, Signature, Tree, Time, Object}; ++use util::Binding; ++ ++/// A structure to represent a git [commit][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects ++pub struct Commit<'repo> { ++ raw: *mut raw::git_commit, ++ _marker: marker::PhantomData>, ++} ++ ++/// An iterator over the parent commits of a commit. ++pub struct Parents<'commit, 'repo: 'commit> { ++ range: Range, ++ commit: &'commit Commit<'repo>, ++} ++ ++/// An iterator over the parent commits' ids of a commit. ++pub struct ParentIds<'commit> { ++ range: Range, ++ commit: &'commit Commit<'commit>, ++} ++ ++impl<'repo> Commit<'repo> { ++ /// Get the id (SHA1) of a repository commit ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_commit_id(&*self.raw)) } ++ } ++ ++ /// Get the id of the tree pointed to by this commit. ++ /// ++ /// No attempts are made to fetch an object from the ODB. ++ pub fn tree_id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_commit_tree_id(&*self.raw)) } ++ } ++ ++ /// Get the tree pointed to by a commit. ++ pub fn tree(&self) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_commit_tree(&mut ret, &*self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get access to the underlying raw pointer. ++ pub fn raw(&self) -> *mut raw::git_commit { self.raw } ++ ++ /// Get the full message of a commit. ++ /// ++ /// The returned message will be slightly prettified by removing any ++ /// potential leading newlines. ++ /// ++ /// `None` will be returned if the message is not valid utf-8 ++ pub fn message(&self) -> Option<&str> { ++ str::from_utf8(self.message_bytes()).ok() ++ } ++ ++ /// Get the full message of a commit as a byte slice. ++ /// ++ /// The returned message will be slightly prettified by removing any ++ /// potential leading newlines. ++ pub fn message_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_commit_message(&*self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the encoding for the message of a commit, as a string representing a ++ /// standard encoding name. ++ /// ++ /// `None` will be returned if the encoding is not known ++ pub fn message_encoding(&self) -> Option<&str> { ++ let bytes = unsafe { ++ ::opt_bytes(self, raw::git_commit_message(&*self.raw)) ++ }; ++ bytes.map(|b| str::from_utf8(b).unwrap()) ++ } ++ ++ /// Get the full raw message of a commit. ++ /// ++ /// `None` will be returned if the message is not valid utf-8 ++ pub fn message_raw(&self) -> Option<&str> { ++ str::from_utf8(self.message_raw_bytes()).ok() ++ } ++ ++ /// Get the full raw message of a commit. ++ pub fn message_raw_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_commit_message_raw(&*self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the full raw text of the commit header. ++ /// ++ /// `None` will be returned if the message is not valid utf-8 ++ pub fn raw_header(&self) -> Option<&str> { ++ str::from_utf8(self.raw_header_bytes()).ok() ++ } ++ ++ /// Get the full raw text of the commit header. ++ pub fn raw_header_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_commit_raw_header(&*self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the short "summary" of the git commit message. ++ /// ++ /// The returned message is the summary of the commit, comprising the first ++ /// paragraph of the message with whitespace trimmed and squashed. ++ /// ++ /// `None` may be returned if an error occurs or if the summary is not valid ++ /// utf-8. ++ pub fn summary(&self) -> Option<&str> { ++ self.summary_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the short "summary" of the git commit message. ++ /// ++ /// The returned message is the summary of the commit, comprising the first ++ /// paragraph of the message with whitespace trimmed and squashed. ++ /// ++ /// `None` may be returned if an error occurs ++ pub fn summary_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_commit_summary(self.raw)) } ++ } ++ ++ /// Get the commit time (i.e. committer time) of a commit. ++ /// ++ /// The first element of the tuple is the time, in seconds, since the epoch. ++ /// The second element is the offset, in minutes, of the time zone of the ++ /// committer's preferred time zone. ++ pub fn time(&self) -> Time { ++ unsafe { ++ Time::new(raw::git_commit_time(&*self.raw) as i64, ++ raw::git_commit_time_offset(&*self.raw) as i32) ++ } ++ } ++ ++ /// Creates a new iterator over the parents of this commit. ++ pub fn parents<'a>(&'a self) -> Parents<'a, 'repo> { ++ let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; ++ Parents { range: 0..max, commit: self } ++ } ++ ++ /// Creates a new iterator over the parents of this commit. ++ pub fn parent_ids(&self) -> ParentIds { ++ let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; ++ ParentIds { range: 0..max, commit: self } ++ } ++ ++ /// Get the author of this commit. ++ pub fn author(&self) -> Signature { ++ unsafe { ++ let ptr = raw::git_commit_author(&*self.raw); ++ signature::from_raw_const(self, ptr) ++ } ++ } ++ ++ /// Get the committer of this commit. ++ pub fn committer(&self) -> Signature { ++ unsafe { ++ let ptr = raw::git_commit_committer(&*self.raw); ++ signature::from_raw_const(self, ptr) ++ } ++ } ++ ++ /// Amend this existing commit with all non-`None` values ++ /// ++ /// This creates a new commit that is exactly the same as the old commit, ++ /// except that any non-`None` values will be updated. The new commit has ++ /// the same parents as the old commit. ++ /// ++ /// For information about `update_ref`, see [`Repository::commit`]. ++ /// ++ /// [`Repository::commit`]: struct.Repository.html#method.commit ++ pub fn amend(&self, ++ update_ref: Option<&str>, ++ author: Option<&Signature>, ++ committer: Option<&Signature>, ++ message_encoding: Option<&str>, ++ message: Option<&str>, ++ tree: Option<&Tree<'repo>>) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ let update_ref = try!(::opt_cstr(update_ref)); ++ let encoding = try!(::opt_cstr(message_encoding)); ++ let message = try!(::opt_cstr(message)); ++ unsafe { ++ try_call!(raw::git_commit_amend(&mut raw, ++ self.raw(), ++ update_ref, ++ author.map(|s| s.raw()), ++ committer.map(|s| s.raw()), ++ encoding, ++ message, ++ tree.map(|t| t.raw()))); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Get the specified parent of the commit. ++ /// ++ /// Use the `parents` iterator to return an iterator over all parents. ++ pub fn parent(&self, i: usize) -> Result, Error> { ++ unsafe { ++ let mut raw = ptr::null_mut(); ++ try_call!(raw::git_commit_parent(&mut raw, &*self.raw, ++ i as libc::c_uint)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Get the specified parent id of the commit. ++ /// ++ /// This is different from `parent`, which will attempt to load the ++ /// parent commit from the ODB. ++ /// ++ /// Use the `parent_ids` iterator to return an iterator over all parents. ++ pub fn parent_id(&self, i: usize) -> Result { ++ unsafe { ++ let id = raw::git_commit_parent_id(self.raw, i as libc::c_uint); ++ if id.is_null() { ++ Err(Error::from_str("parent index out of bounds")) ++ } else { ++ Ok(Binding::from_raw(id)) ++ } ++ } ++ } ++ ++ /// Casts this Commit to be usable as an `Object` ++ pub fn as_object(&self) -> &Object<'repo> { ++ unsafe { ++ &*(self as *const _ as *const Object<'repo>) ++ } ++ } ++ ++ /// Consumes Commit to be returned as an `Object` ++ pub fn into_object(self) -> Object<'repo> { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ unsafe { ++ mem::transmute(self) ++ } ++ } ++} ++ ++impl<'repo> Binding for Commit<'repo> { ++ type Raw = *mut raw::git_commit; ++ unsafe fn from_raw(raw: *mut raw::git_commit) -> Commit<'repo> { ++ Commit { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_commit { self.raw } ++} ++ ++impl<'repo> ::std::fmt::Debug for Commit<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ let mut ds = f.debug_struct("Commit"); ++ ds.field("id", &self.id()); ++ if let Some(summary) = self.summary() { ++ ds.field("summary", &summary); ++ } ++ ds.finish() ++ } ++} ++ ++impl<'repo, 'commit> Iterator for Parents<'commit, 'repo> { ++ type Item = Commit<'repo>; ++ fn next(&mut self) -> Option> { ++ self.range.next().map(|i| self.commit.parent(i).unwrap()) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++ ++impl<'repo, 'commit> DoubleEndedIterator for Parents<'commit, 'repo> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().map(|i| self.commit.parent(i).unwrap()) ++ } ++} ++ ++impl<'repo, 'commit> ExactSizeIterator for Parents<'commit, 'repo> {} ++ ++impl<'commit> Iterator for ParentIds<'commit> { ++ type Item = Oid; ++ fn next(&mut self) -> Option { ++ self.range.next().map(|i| self.commit.parent_id(i).unwrap()) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++ ++impl<'commit> DoubleEndedIterator for ParentIds<'commit> { ++ fn next_back(&mut self) -> Option { ++ self.range.next_back().map(|i| self.commit.parent_id(i).unwrap()) ++ } ++} ++ ++impl<'commit> ExactSizeIterator for ParentIds<'commit> {} ++ ++impl<'repo> Clone for Commit<'repo> { ++ fn clone(&self) -> Self { ++ self.as_object().clone().into_commit().ok().unwrap() ++ } ++} ++ ++impl<'repo> Drop for Commit<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_commit_free(self.raw) } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = repo.head().unwrap(); ++ let target = head.target().unwrap(); ++ let commit = repo.find_commit(target).unwrap(); ++ assert_eq!(commit.message(), Some("initial")); ++ assert_eq!(commit.id(), target); ++ commit.message_raw().unwrap(); ++ commit.raw_header().unwrap(); ++ commit.message_encoding(); ++ commit.summary().unwrap(); ++ commit.tree_id(); ++ commit.tree().unwrap(); ++ assert_eq!(commit.parents().count(), 0); ++ ++ assert_eq!(commit.author().name(), Some("name")); ++ assert_eq!(commit.author().email(), Some("email")); ++ assert_eq!(commit.committer().name(), Some("name")); ++ assert_eq!(commit.committer().email(), Some("email")); ++ ++ let sig = repo.signature().unwrap(); ++ let tree = repo.find_tree(commit.tree_id()).unwrap(); ++ let id = repo.commit(Some("HEAD"), &sig, &sig, "bar", &tree, ++ &[&commit]).unwrap(); ++ let head = repo.find_commit(id).unwrap(); ++ ++ let new_head = head.amend(Some("HEAD"), None, None, None, ++ Some("new message"), None).unwrap(); ++ let new_head = repo.find_commit(new_head).unwrap(); ++ assert_eq!(new_head.message(), Some("new message")); ++ new_head.into_object(); ++ ++ repo.find_object(target, None).unwrap().as_commit().unwrap(); ++ repo.find_object(target, None).unwrap().into_commit().ok().unwrap(); ++ } ++} ++ diff --cc vendor/git2-0.7.5/src/config.rs index 000000000,000000000..b01729aeb new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/config.rs @@@ -1,0 -1,0 +1,628 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::path::{Path, PathBuf}; ++use std::ptr; ++use std::str; ++use libc; ++ ++use {raw, Error, ConfigLevel, Buf, IntoCString}; ++use util::{self, Binding}; ++ ++/// A structure representing a git configuration key/value store ++pub struct Config { ++ raw: *mut raw::git_config, ++} ++ ++/// A struct representing a certain entry owned by a `Config` instance. ++/// ++/// An entry has a name, a value, and a level it applies to. ++pub struct ConfigEntry<'cfg> { ++ raw: *mut raw::git_config_entry, ++ _marker: marker::PhantomData<&'cfg Config>, ++ owned: bool, ++} ++ ++/// An iterator over the `ConfigEntry` values of a `Config` structure. ++pub struct ConfigEntries<'cfg> { ++ raw: *mut raw::git_config_iterator, ++ _marker: marker::PhantomData<&'cfg Config>, ++} ++ ++impl Config { ++ /// Allocate a new configuration object ++ /// ++ /// This object is empty, so you have to add a file to it before you can do ++ /// anything with it. ++ pub fn new() -> Result { ++ ::init(); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_config_new(&mut raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new config instance containing a single on-disk file ++ pub fn open(path: &Path) -> Result { ++ ::init(); ++ let mut raw = ptr::null_mut(); ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_config_open_ondisk(&mut raw, path)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Open the global, XDG and system configuration files ++ /// ++ /// Utility wrapper that finds the global, XDG and system configuration ++ /// files and opens them into a single prioritized config object that can ++ /// be used when accessing default config data outside a repository. ++ pub fn open_default() -> Result { ++ ::init(); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_config_open_default(&mut raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Locate the path to the global configuration file ++ /// ++ /// The user or global configuration file is usually located in ++ /// `$HOME/.gitconfig`. ++ /// ++ /// This method will try to guess the full path to that file, if the file ++ /// exists. The returned path may be used on any method call to load ++ /// the global configuration file. ++ /// ++ /// This method will not guess the path to the xdg compatible config file ++ /// (`.config/git/config`). ++ pub fn find_global() -> Result { ++ ::init(); ++ let buf = Buf::new(); ++ unsafe { try_call!(raw::git_config_find_global(buf.raw())); } ++ Ok(util::bytes2path(&buf).to_path_buf()) ++ } ++ ++ /// Locate the path to the system configuration file ++ /// ++ /// If /etc/gitconfig doesn't exist, it will look for %PROGRAMFILES% ++ pub fn find_system() -> Result { ++ ::init(); ++ let buf = Buf::new(); ++ unsafe { try_call!(raw::git_config_find_system(buf.raw())); } ++ Ok(util::bytes2path(&buf).to_path_buf()) ++ } ++ ++ /// Locate the path to the global xdg compatible configuration file ++ /// ++ /// The xdg compatible configuration file is usually located in ++ /// `$HOME/.config/git/config`. ++ pub fn find_xdg() -> Result { ++ ::init(); ++ let buf = Buf::new(); ++ unsafe { try_call!(raw::git_config_find_xdg(buf.raw())); } ++ Ok(util::bytes2path(&buf).to_path_buf()) ++ } ++ ++ /// Add an on-disk config file instance to an existing config ++ /// ++ /// The on-disk file pointed at by path will be opened and parsed; it's ++ /// expected to be a native Git config file following the default Git config ++ /// syntax (see man git-config). ++ /// ++ /// Further queries on this config object will access each of the config ++ /// file instances in order (instances with a higher priority level will be ++ /// accessed first). ++ pub fn add_file(&mut self, path: &Path, level: ConfigLevel, ++ force: bool) -> Result<(), Error> { ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_config_add_file_ondisk(self.raw, path, level, ++ ptr::null(), force)); ++ Ok(()) ++ } ++ } ++ ++ /// Delete a config variable from the config file with the highest level ++ /// (usually the local one). ++ pub fn remove(&mut self, name: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_delete_entry(self.raw, name)); ++ Ok(()) ++ } ++ } ++ ++ /// Remove multivar config variables in the config file with the highest level (usually the ++ /// local one). ++ pub fn remove_multivar(&mut self, name: &str, regexp: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let regexp = try!(CString::new(regexp)); ++ unsafe { ++ try_call!(raw::git_config_delete_multivar(self.raw, name, regexp)); ++ } ++ Ok(()) ++ } ++ ++ /// Get the value of a boolean config variable. ++ /// ++ /// All config files will be looked into, in the order of their defined ++ /// level. A higher level means a higher priority. The first occurrence of ++ /// the variable will be returned here. ++ pub fn get_bool(&self, name: &str) -> Result { ++ let mut out = 0 as libc::c_int; ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_bool(&mut out, &*self.raw, name)); ++ ++ } ++ Ok(!(out == 0)) ++ } ++ ++ /// Get the value of an integer config variable. ++ /// ++ /// All config files will be looked into, in the order of their defined ++ /// level. A higher level means a higher priority. The first occurrence of ++ /// the variable will be returned here. ++ pub fn get_i32(&self, name: &str) -> Result { ++ let mut out = 0i32; ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_int32(&mut out, &*self.raw, name)); ++ ++ } ++ Ok(out) ++ } ++ ++ /// Get the value of an integer config variable. ++ /// ++ /// All config files will be looked into, in the order of their defined ++ /// level. A higher level means a higher priority. The first occurrence of ++ /// the variable will be returned here. ++ pub fn get_i64(&self, name: &str) -> Result { ++ let mut out = 0i64; ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_int64(&mut out, &*self.raw, name)); ++ } ++ Ok(out) ++ } ++ ++ /// Get the value of a string config variable. ++ /// ++ /// This is the same as `get_bytes` except that it may return `Err` if ++ /// the bytes are not valid utf-8. ++ pub fn get_str(&self, name: &str) -> Result<&str, Error> { ++ str::from_utf8(try!(self.get_bytes(name))).map_err(|_| { ++ Error::from_str("configuration value is not valid utf8") ++ }) ++ } ++ ++ /// Get the value of a string config variable as a byte slice. ++ /// ++ /// This method will return an error if this `Config` is not a snapshot. ++ pub fn get_bytes(&self, name: &str) -> Result<&[u8], Error> { ++ let mut ret = ptr::null(); ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_string(&mut ret, &*self.raw, name)); ++ Ok(::opt_bytes(self, ret).unwrap()) ++ } ++ } ++ ++ /// Get the value of a string config variable as an owned string. ++ /// ++ /// An error will be returned if the config value is not valid utf-8. ++ pub fn get_string(&self, name: &str) -> Result { ++ let ret = Buf::new(); ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_string_buf(ret.raw(), self.raw, name)); ++ } ++ str::from_utf8(&ret).map(|s| s.to_string()).map_err(|_| { ++ Error::from_str("configuration value is not valid utf8") ++ }) ++ } ++ ++ /// Get the value of a path config variable as an owned . ++ pub fn get_path(&self, name: &str) -> Result { ++ let ret = Buf::new(); ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_path(ret.raw(), self.raw, name)); ++ } ++ Ok(::util::bytes2path(&ret).to_path_buf()) ++ } ++ ++ /// Get the ConfigEntry for a config variable. ++ pub fn get_entry(&self, name: &str) -> Result { ++ let mut ret = ptr::null_mut(); ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_get_entry(&mut ret, self.raw, name)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Iterate over all the config variables ++ /// ++ /// If `glob` is `Some`, then the iterator will only iterate over all ++ /// variables whose name matches the pattern. ++ /// ++ /// # Example ++ /// ++ /// ``` ++ /// # #![allow(unstable)] ++ /// use git2::Config; ++ /// ++ /// let cfg = Config::new().unwrap(); ++ /// ++ /// for entry in &cfg.entries(None).unwrap() { ++ /// let entry = entry.unwrap(); ++ /// println!("{} => {}", entry.name().unwrap(), entry.value().unwrap()); ++ /// } ++ /// ``` ++ pub fn entries(&self, glob: Option<&str>) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ match glob { ++ Some(s) => { ++ let s = try!(CString::new(s)); ++ try_call!(raw::git_config_iterator_glob_new(&mut ret, ++ &*self.raw, ++ s)); ++ } ++ None => { ++ try_call!(raw::git_config_iterator_new(&mut ret, &*self.raw)); ++ } ++ } ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Open the global/XDG configuration file according to git's rules ++ /// ++ /// Git allows you to store your global configuration at `$HOME/.config` or ++ /// `$XDG_CONFIG_HOME/git/config`. For backwards compatability, the XDG file ++ /// shouldn't be used unless the use has created it explicitly. With this ++ /// function you'll open the correct one to write to. ++ pub fn open_global(&mut self) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_config_open_global(&mut raw, self.raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Build a single-level focused config object from a multi-level one. ++ /// ++ /// The returned config object can be used to perform get/set/delete ++ /// operations on a single specific level. ++ pub fn open_level(&self, level: ConfigLevel) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_config_open_level(&mut raw, &*self.raw, level)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Set the value of a boolean config variable in the config file with the ++ /// highest level (usually the local one). ++ pub fn set_bool(&mut self, name: &str, value: bool) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_set_bool(self.raw, name, value)); ++ } ++ Ok(()) ++ } ++ ++ /// Set the value of an integer config variable in the config file with the ++ /// highest level (usually the local one). ++ pub fn set_i32(&mut self, name: &str, value: i32) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_set_int32(self.raw, name, value)); ++ } ++ Ok(()) ++ } ++ ++ /// Set the value of an integer config variable in the config file with the ++ /// highest level (usually the local one). ++ pub fn set_i64(&mut self, name: &str, value: i64) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_config_set_int64(self.raw, name, value)); ++ } ++ Ok(()) ++ } ++ ++ /// Set the value of an multivar config variable in the config file with the ++ /// highest level (usually the local one). ++ pub fn set_multivar(&mut self, name: &str, regexp: &str, value: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let regexp = try!(CString::new(regexp)); ++ let value = try!(CString::new(value)); ++ unsafe { ++ try_call!(raw::git_config_set_multivar(self.raw, name, regexp, value)); ++ } ++ Ok(()) ++ } ++ ++ /// Set the value of a string config variable in the config file with the ++ /// highest level (usually the local one). ++ pub fn set_str(&mut self, name: &str, value: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let value = try!(CString::new(value)); ++ unsafe { ++ try_call!(raw::git_config_set_string(self.raw, name, value)); ++ } ++ Ok(()) ++ } ++ ++ /// Create a snapshot of the configuration ++ /// ++ /// Create a snapshot of the current state of a configuration, which allows ++ /// you to look into a consistent view of the configuration for looking up ++ /// complex values (e.g. a remote, submodule). ++ pub fn snapshot(&mut self) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_config_snapshot(&mut ret, self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Parse a string as a bool. ++ /// Interprets "true", "yes", "on", 1, or any non-zero number as true. ++ /// Interprets "false", "no", "off", 0, or an empty string as false. ++ pub fn parse_bool(s: S) -> Result { ++ let s = try!(s.into_c_string()); ++ let mut out = 0; ++ ::init(); ++ unsafe { ++ try_call!(raw::git_config_parse_bool(&mut out, s)); ++ } ++ Ok(out != 0) ++ } ++ ++ /// Parse a string as an i32; handles suffixes like k, M, or G, and ++ /// multiplies by the appropriate power of 1024. ++ pub fn parse_i32(s: S) -> Result { ++ let s = try!(s.into_c_string()); ++ let mut out = 0; ++ ::init(); ++ unsafe { ++ try_call!(raw::git_config_parse_int32(&mut out, s)); ++ } ++ Ok(out) ++ } ++ ++ /// Parse a string as an i64; handles suffixes like k, M, or G, and ++ /// multiplies by the appropriate power of 1024. ++ pub fn parse_i64(s: S) -> Result { ++ let s = try!(s.into_c_string()); ++ let mut out = 0; ++ ::init(); ++ unsafe { ++ try_call!(raw::git_config_parse_int64(&mut out, s)); ++ } ++ Ok(out) ++ } ++} ++ ++impl Binding for Config { ++ type Raw = *mut raw::git_config; ++ unsafe fn from_raw(raw: *mut raw::git_config) -> Config { ++ Config { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_config { self.raw } ++} ++ ++impl Drop for Config { ++ fn drop(&mut self) { ++ unsafe { raw::git_config_free(self.raw) } ++ } ++} ++ ++impl<'cfg> ConfigEntry<'cfg> { ++ /// Gets the name of this entry. ++ /// ++ /// May return `None` if the name is not valid utf-8 ++ pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } ++ ++ /// Gets the name of this entry as a byte slice. ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } ++ } ++ ++ /// Gets the value of this entry. ++ /// ++ /// May return `None` if the value is not valid utf-8 ++ pub fn value(&self) -> Option<&str> { str::from_utf8(self.value_bytes()).ok() } ++ ++ /// Gets the value of this entry as a byte slice. ++ pub fn value_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, (*self.raw).value).unwrap() } ++ } ++ ++ /// Gets the configuration level of this entry. ++ pub fn level(&self) -> ConfigLevel { ++ unsafe { ConfigLevel::from_raw((*self.raw).level) } ++ } ++ ++ /// Depth of includes where this variable was found ++ pub fn include_depth(&self) -> u32 { ++ unsafe { (*self.raw).include_depth as u32 } ++ } ++} ++ ++impl<'cfg> Binding for ConfigEntry<'cfg> { ++ type Raw = *mut raw::git_config_entry; ++ ++ unsafe fn from_raw(raw: *mut raw::git_config_entry) ++ -> ConfigEntry<'cfg> { ++ ConfigEntry { ++ raw: raw, ++ _marker: marker::PhantomData, ++ owned: true, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_config_entry { self.raw } ++} ++ ++impl<'cfg> Binding for ConfigEntries<'cfg> { ++ type Raw = *mut raw::git_config_iterator; ++ ++ unsafe fn from_raw(raw: *mut raw::git_config_iterator) ++ -> ConfigEntries<'cfg> { ++ ConfigEntries { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_config_iterator { self.raw } ++} ++ ++// entries are only valid until the iterator is freed, so this impl is for ++// `&'b T` instead of `T` to have a lifetime to tie them to. ++// ++// It's also not implemented for `&'b mut T` so we can have multiple entries ++// (ok). ++impl<'cfg, 'b> Iterator for &'b ConfigEntries<'cfg> { ++ type Item = Result, Error>; ++ fn next(&mut self) -> Option, Error>> { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call_iter!(raw::git_config_next(&mut raw, self.raw)); ++ Some(Ok(ConfigEntry { ++ owned: false, ++ raw: raw, ++ _marker: marker::PhantomData, ++ })) ++ } ++ } ++} ++ ++impl<'cfg> Drop for ConfigEntries<'cfg> { ++ fn drop(&mut self) { ++ unsafe { raw::git_config_iterator_free(self.raw) } ++ } ++} ++ ++impl<'cfg> Drop for ConfigEntry<'cfg> { ++ fn drop(&mut self) { ++ if self.owned { ++ unsafe { raw::git_config_entry_free(self.raw) } ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::File; ++ use tempdir::TempDir; ++ ++ use Config; ++ ++ #[test] ++ fn smoke() { ++ let _cfg = Config::new().unwrap(); ++ let _ = Config::find_global(); ++ let _ = Config::find_system(); ++ let _ = Config::find_xdg(); ++ } ++ ++ #[test] ++ fn persisted() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path().join("foo"); ++ File::create(&path).unwrap(); ++ ++ let mut cfg = Config::open(&path).unwrap(); ++ assert!(cfg.get_bool("foo.bar").is_err()); ++ cfg.set_bool("foo.k1", true).unwrap(); ++ cfg.set_i32("foo.k2", 1).unwrap(); ++ cfg.set_i64("foo.k3", 2).unwrap(); ++ cfg.set_str("foo.k4", "bar").unwrap(); ++ cfg.snapshot().unwrap(); ++ drop(cfg); ++ ++ let cfg = Config::open(&path).unwrap().snapshot().unwrap(); ++ assert_eq!(cfg.get_bool("foo.k1").unwrap(), true); ++ assert_eq!(cfg.get_i32("foo.k2").unwrap(), 1); ++ assert_eq!(cfg.get_i64("foo.k3").unwrap(), 2); ++ assert_eq!(cfg.get_str("foo.k4").unwrap(), "bar"); ++ ++ for entry in &cfg.entries(None).unwrap() { ++ let entry = entry.unwrap(); ++ entry.name(); ++ entry.value(); ++ entry.level(); ++ } ++ } ++ ++ #[test] ++ fn multivar() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path().join("foo"); ++ File::create(&path).unwrap(); ++ ++ let mut cfg = Config::open(&path).unwrap(); ++ cfg.set_multivar("foo.bar", "^$", "baz").unwrap(); ++ cfg.set_multivar("foo.bar", "^$", "qux").unwrap(); ++ ++ let mut values: Vec = cfg.entries(None) ++ .unwrap() ++ .into_iter() ++ .map(|entry| entry.unwrap().value().unwrap().into()) ++ .collect(); ++ values.sort(); ++ assert_eq!(values, ["baz", "qux"]); ++ ++ cfg.remove_multivar("foo.bar", ".*").unwrap(); ++ ++ assert_eq!(cfg.entries(None).unwrap().count(), 0); ++ } ++ ++ #[test] ++ fn parse() { ++ assert_eq!(Config::parse_bool("").unwrap(), false); ++ assert_eq!(Config::parse_bool("false").unwrap(), false); ++ assert_eq!(Config::parse_bool("no").unwrap(), false); ++ assert_eq!(Config::parse_bool("off").unwrap(), false); ++ assert_eq!(Config::parse_bool("0").unwrap(), false); ++ ++ assert_eq!(Config::parse_bool("true").unwrap(), true); ++ assert_eq!(Config::parse_bool("yes").unwrap(), true); ++ assert_eq!(Config::parse_bool("on").unwrap(), true); ++ assert_eq!(Config::parse_bool("1").unwrap(), true); ++ assert_eq!(Config::parse_bool("42").unwrap(), true); ++ ++ assert!(Config::parse_bool(" ").is_err()); ++ assert!(Config::parse_bool("some-string").is_err()); ++ assert!(Config::parse_bool("-").is_err()); ++ ++ assert_eq!(Config::parse_i32("0").unwrap(), 0); ++ assert_eq!(Config::parse_i32("1").unwrap(), 1); ++ assert_eq!(Config::parse_i32("100").unwrap(), 100); ++ assert_eq!(Config::parse_i32("-1").unwrap(), -1); ++ assert_eq!(Config::parse_i32("-100").unwrap(), -100); ++ assert_eq!(Config::parse_i32("1k").unwrap(), 1024); ++ assert_eq!(Config::parse_i32("4k").unwrap(), 4096); ++ assert_eq!(Config::parse_i32("1M").unwrap(), 1048576); ++ assert_eq!(Config::parse_i32("1G").unwrap(), 1024*1024*1024); ++ ++ assert_eq!(Config::parse_i64("0").unwrap(), 0); ++ assert_eq!(Config::parse_i64("1").unwrap(), 1); ++ assert_eq!(Config::parse_i64("100").unwrap(), 100); ++ assert_eq!(Config::parse_i64("-1").unwrap(), -1); ++ assert_eq!(Config::parse_i64("-100").unwrap(), -100); ++ assert_eq!(Config::parse_i64("1k").unwrap(), 1024); ++ assert_eq!(Config::parse_i64("4k").unwrap(), 4096); ++ assert_eq!(Config::parse_i64("1M").unwrap(), 1048576); ++ assert_eq!(Config::parse_i64("1G").unwrap(), 1024*1024*1024); ++ assert_eq!(Config::parse_i64("100G").unwrap(), 100*1024*1024*1024); ++ } ++} diff --cc vendor/git2-0.7.5/src/cred.rs index 000000000,000000000..29d514885 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/cred.rs @@@ -1,0 -1,0 +1,577 @@@ ++use std::ffi::CString; ++use std::io::Write; ++use std::mem; ++use std::path::Path; ++use std::process::{Command, Stdio}; ++use std::ptr; ++use url; ++ ++use {raw, Error, Config, IntoCString}; ++use util::Binding; ++ ++/// A structure to represent git credentials in libgit2. ++pub struct Cred { ++ raw: *mut raw::git_cred, ++} ++ ++/// Management of the gitcredentials(7) interface. ++pub struct CredentialHelper { ++ /// A public field representing the currently discovered username from ++ /// configuration. ++ pub username: Option, ++ protocol: Option, ++ host: Option, ++ url: String, ++ commands: Vec, ++} ++ ++impl Cred { ++ /// Create a "default" credential usable for Negotiate mechanisms like NTLM ++ /// or Kerberos authentication. ++ pub fn default() -> Result { ++ ::init(); ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_cred_default_new(&mut out)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Create a new ssh key credential object used for querying an ssh-agent. ++ /// ++ /// The username specified is the username to authenticate. ++ pub fn ssh_key_from_agent(username: &str) -> Result { ++ ::init(); ++ let mut out = ptr::null_mut(); ++ let username = try!(CString::new(username)); ++ unsafe { ++ try_call!(raw::git_cred_ssh_key_from_agent(&mut out, username)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Create a new passphrase-protected ssh key credential object. ++ pub fn ssh_key(username: &str, ++ publickey: Option<&Path>, ++ privatekey: &Path, ++ passphrase: Option<&str>) -> Result { ++ ::init(); ++ let username = try!(CString::new(username)); ++ let publickey = try!(::opt_cstr(publickey)); ++ let privatekey = try!(privatekey.into_c_string()); ++ let passphrase = try!(::opt_cstr(passphrase)); ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_cred_ssh_key_new(&mut out, username, publickey, ++ privatekey, passphrase)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Create a new ssh key credential object reading the keys from memory. ++ pub fn ssh_key_from_memory(username: &str, ++ publickey: Option<&str>, ++ privatekey: &str, ++ passphrase: Option<&str>) -> Result { ++ ::init(); ++ let username = try!(CString::new(username)); ++ let publickey = try!(::opt_cstr(publickey)); ++ let privatekey = try!(CString::new(privatekey)); ++ let passphrase = try!(::opt_cstr(passphrase)); ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_cred_ssh_key_memory_new(&mut out, username, publickey, ++ privatekey, passphrase)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Create a new plain-text username and password credential object. ++ pub fn userpass_plaintext(username: &str, ++ password: &str) -> Result { ++ ::init(); ++ let username = try!(CString::new(username)); ++ let password = try!(CString::new(password)); ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_cred_userpass_plaintext_new(&mut out, username, ++ password)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Attempt to read `credential.helper` according to gitcredentials(7) [1] ++ /// ++ /// This function will attempt to parse the user's `credential.helper` ++ /// configuration, invoke the necessary processes, and read off what the ++ /// username/password should be for a particular url. ++ /// ++ /// The returned credential type will be a username/password credential if ++ /// successful. ++ /// ++ /// [1]: https://www.kernel.org/pub/software/scm/git/docs/gitcredentials.html ++ pub fn credential_helper(config: &Config, ++ url: &str, ++ username: Option<&str>) ++ -> Result { ++ match CredentialHelper::new(url).config(config).username(username) ++ .execute() { ++ Some((username, password)) => { ++ Cred::userpass_plaintext(&username, &password) ++ } ++ None => Err(Error::from_str("failed to acquire username/password \ ++ from local configuration")) ++ } ++ } ++ ++ /// Create a credential to specify a username. ++ /// ++ /// THis is used with ssh authentication to query for the username if non is ++ /// specified in the url. ++ pub fn username(username: &str) -> Result { ++ ::init(); ++ let username = try!(CString::new(username)); ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_cred_username_new(&mut out, username)); ++ Ok(Binding::from_raw(out)) ++ } ++ } ++ ++ /// Check whether a credential object contains username information. ++ pub fn has_username(&self) -> bool { ++ unsafe { raw::git_cred_has_username(self.raw) == 1 } ++ } ++ ++ /// Return the type of credentials that this object represents. ++ pub fn credtype(&self) -> raw::git_credtype_t { ++ unsafe { (*self.raw).credtype } ++ } ++ ++ /// Unwrap access to the underlying raw pointer, canceling the destructor ++ pub unsafe fn unwrap(mut self) -> *mut raw::git_cred { ++ mem::replace(&mut self.raw, ptr::null_mut()) ++ } ++} ++ ++impl Binding for Cred { ++ type Raw = *mut raw::git_cred; ++ ++ unsafe fn from_raw(raw: *mut raw::git_cred) -> Cred { ++ Cred { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_cred { self.raw } ++} ++ ++impl Drop for Cred { ++ fn drop(&mut self) { ++ if !self.raw.is_null() { ++ unsafe { ((*self.raw).free)(self.raw) } ++ } ++ } ++} ++ ++impl CredentialHelper { ++ /// Create a new credential helper object which will be used to probe git's ++ /// local credential configuration. ++ /// ++ /// The url specified is the namespace on which this will query credentials. ++ /// Invalid urls are currently ignored. ++ pub fn new(url: &str) -> CredentialHelper { ++ let mut ret = CredentialHelper { ++ protocol: None, ++ host: None, ++ username: None, ++ url: url.to_string(), ++ commands: Vec::new(), ++ }; ++ ++ // Parse out the (protocol, host) if one is available ++ if let Ok(url) = url::Url::parse(url) { ++ if let Some(url::Host::Domain(s)) = url.host() { ++ ret.host = Some(s.to_string()); ++ } ++ ret.protocol = Some(url.scheme().to_string()) ++ } ++ ret ++ } ++ ++ /// Set the username that this credential helper will query with. ++ /// ++ /// By default the username is `None`. ++ pub fn username(&mut self, username: Option<&str>) -> &mut CredentialHelper { ++ self.username = username.map(|s| s.to_string()); ++ self ++ } ++ ++ /// Query the specified configuration object to discover commands to ++ /// execute, usernames to query, etc. ++ pub fn config(&mut self, config: &Config) -> &mut CredentialHelper { ++ // Figure out the configured username/helper program. ++ // ++ // see http://git-scm.com/docs/gitcredentials.html#_configuration_options ++ // ++ // TODO: implement useHttpPath ++ if self.username.is_none() { ++ self.config_username(config); ++ } ++ self.config_helper(config); ++ self ++ } ++ ++ // Configure the queried username from `config` ++ fn config_username(&mut self, config: &Config) { ++ let key = self.exact_key("username"); ++ self.username = config.get_string(&key).ok().or_else(|| { ++ self.url_key("username").and_then(|s| { ++ config.get_string(&s).ok() ++ }) ++ }).or_else(|| { ++ config.get_string("credential.username").ok() ++ }) ++ } ++ ++ // Discover all `helper` directives from `config` ++ fn config_helper(&mut self, config: &Config) { ++ let exact = config.get_string(&self.exact_key("helper")); ++ self.add_command(exact.as_ref().ok().map(|s| &s[..])); ++ if let Some(key) = self.url_key("helper") { ++ let url = config.get_string(&key); ++ self.add_command(url.as_ref().ok().map(|s| &s[..])); ++ } ++ let global = config.get_string("credential.helper"); ++ self.add_command(global.as_ref().ok().map(|s| &s[..])); ++ } ++ ++ // Add a `helper` configured command to the list of commands to execute. ++ // ++ // see https://www.kernel.org/pub/software/scm/git/docs/technical ++ // /api-credentials.html#_credential_helpers ++ fn add_command(&mut self, cmd: Option<&str>) { ++ let cmd = match cmd { ++ Some("") | None => return, ++ Some(s) => s, ++ }; ++ ++ if cmd.starts_with('!') { ++ self.commands.push(cmd[1..].to_string()); ++ } else if cmd.starts_with('/') || cmd.starts_with('\\') || ++ cmd[1..].starts_with(":\\") { ++ self.commands.push(format!("\"{}\"", cmd)); ++ } else { ++ self.commands.push(format!("git credential-{}", cmd)); ++ } ++ } ++ ++ fn exact_key(&self, name: &str) -> String { ++ format!("credential.{}.{}", self.url, name) ++ } ++ ++ fn url_key(&self, name: &str) -> Option { ++ match (&self.host, &self.protocol) { ++ (&Some(ref host), &Some(ref protocol)) => { ++ Some(format!("credential.{}://{}.{}", protocol, host, name)) ++ } ++ _ => None ++ } ++ } ++ ++ /// Execute this helper, attempting to discover a username/password pair. ++ /// ++ /// All I/O errors are ignored, (to match git behavior), and this function ++ /// only succeeds if both a username and a password were found ++ pub fn execute(&self) -> Option<(String, String)> { ++ let mut username = self.username.clone(); ++ let mut password = None; ++ for cmd in &self.commands { ++ let (u, p) = self.execute_cmd(cmd, &username); ++ if u.is_some() && username.is_none() { ++ username = u; ++ } ++ if p.is_some() && password.is_none() { ++ password = p; ++ } ++ if username.is_some() && password.is_some() { break } ++ } ++ ++ match (username, password) { ++ (Some(u), Some(p)) => Some((u, p)), ++ _ => None, ++ } ++ } ++ ++ // Execute the given `cmd`, providing the appropriate variables on stdin and ++ // then afterwards parsing the output into the username/password on stdout. ++ fn execute_cmd(&self, cmd: &str, username: &Option) ++ -> (Option, Option) { ++ macro_rules! my_try( ($e:expr) => ( ++ match $e { ++ Ok(e) => e, ++ Err(e) => { ++ debug!("{} failed with {}", stringify!($e), e); ++ return (None, None) ++ } ++ } ++ ) ); ++ ++ // It looks like the `cmd` specification is typically bourne-shell-like ++ // syntax, so try that first. If that fails, though, we may be on a ++ // Windows machine for example where `sh` isn't actually available by ++ // default. Most credential helper configurations though are pretty ++ // simple (aka one or two space-separated strings) so also try to invoke ++ // the process directly. ++ // ++ // If that fails then it's up to the user to put `sh` in path and make ++ // sure it works. ++ let mut c = Command::new("sh"); ++ c.arg("-c") ++ .arg(&format!("{} get", cmd)) ++ .stdin(Stdio::piped()) ++ .stdout(Stdio::piped()) ++ .stderr(Stdio::piped()); ++ let mut p = match c.spawn() { ++ Ok(p) => p, ++ Err(e) => { ++ debug!("`sh` failed to spawn: {}", e); ++ let mut parts = cmd.split_whitespace(); ++ let mut c = Command::new(parts.next().unwrap()); ++ for arg in parts { ++ c.arg(arg); ++ } ++ c.arg("get") ++ .stdin(Stdio::piped()) ++ .stdout(Stdio::piped()) ++ .stderr(Stdio::piped()); ++ match c.spawn() { ++ Ok(p) => p, ++ Err(e) => { ++ debug!("fallback of {:?} failed with {}", cmd, e); ++ return (None, None); ++ } ++ } ++ } ++ }; ++ ++ // Ignore write errors as the command may not actually be listening for ++ // stdin ++ { ++ let stdin = p.stdin.as_mut().unwrap(); ++ if let Some(ref p) = self.protocol { ++ let _ = writeln!(stdin, "protocol={}", p); ++ } ++ if let Some(ref p) = self.host { ++ let _ = writeln!(stdin, "host={}", p); ++ } ++ if let Some(ref p) = *username { ++ let _ = writeln!(stdin, "username={}", p); ++ } ++ } ++ let output = my_try!(p.wait_with_output()); ++ if !output.status.success() { ++ debug!("credential helper failed: {}\nstdout ---\n{}\nstdout ---\n{}", ++ output.status, ++ String::from_utf8_lossy(&output.stdout), ++ String::from_utf8_lossy(&output.stderr)); ++ return (None, None) ++ } ++ trace!("credential helper stderr ---\n{}", ++ String::from_utf8_lossy(&output.stderr)); ++ self.parse_output(output.stdout) ++ } ++ ++ // Parse the output of a command into the username/password found ++ fn parse_output(&self, output: Vec) -> (Option, Option) { ++ // Parse the output of the command, looking for username/password ++ let mut username = None; ++ let mut password = None; ++ for line in output.split(|t| *t == b'\n') { ++ let mut parts = line.splitn(2, |t| *t == b'='); ++ let key = parts.next().unwrap(); ++ let value = match parts.next() { ++ Some(s) => s, ++ None => { ++ trace!("ignoring output line: {}", String::from_utf8_lossy(line)); ++ continue ++ } ++ }; ++ let value = match String::from_utf8(value.to_vec()) { ++ Ok(s) => s, ++ Err(..) => continue, ++ }; ++ match key { ++ b"username" => username = Some(value), ++ b"password" => password = Some(value), ++ _ => {} ++ } ++ } ++ (username, password) ++ } ++} ++ ++#[cfg(all(test, feature = "unstable"))] ++mod test { ++ use std::env; ++ use std::fs::File; ++ use std::io::prelude::*; ++ use std::path::Path; ++ use tempdir::TempDir; ++ ++ use {Cred, Config, CredentialHelper, ConfigLevel}; ++ ++ macro_rules! cfg( ($($k:expr => $v:expr),*) => ({ ++ let td = TempDir::new("git2-rs").unwrap(); ++ let mut cfg = Config::new().unwrap(); ++ cfg.add_file(&td.path().join("cfg"), ConfigLevel::Highest, false).unwrap(); ++ $(cfg.set_str($k, $v).unwrap();)* ++ cfg ++ }) ); ++ ++ #[test] ++ fn smoke() { ++ Cred::default().unwrap(); ++ } ++ ++ #[test] ++ fn credential_helper1() { ++ let cfg = cfg! { ++ "credential.helper" => "!f() { echo username=a; echo password=b; }; f" ++ }; ++ let (u, p) = CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().unwrap(); ++ assert_eq!(u, "a"); ++ assert_eq!(p, "b"); ++ } ++ ++ #[test] ++ fn credential_helper2() { ++ let cfg = cfg! {}; ++ assert!(CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().is_none()); ++ } ++ ++ #[test] ++ fn credential_helper3() { ++ let cfg = cfg! { ++ "credential.https://example.com.helper" => ++ "!f() { echo username=c; }; f", ++ "credential.helper" => "!f() { echo username=a; echo password=b; }; f" ++ }; ++ let (u, p) = CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().unwrap(); ++ assert_eq!(u, "c"); ++ assert_eq!(p, "b"); ++ } ++ ++ #[test] ++ fn credential_helper4() { ++ let td = TempDir::new("git2-rs").unwrap(); ++ let path = td.path().join("script"); ++ File::create(&path).unwrap().write(br"\ ++#!/bin/sh ++echo username=c ++").unwrap(); ++ chmod(&path); ++ let cfg = cfg! { ++ "credential.https://example.com.helper" => ++ &path.display().to_string()[..], ++ "credential.helper" => "!f() { echo username=a; echo password=b; }; f" ++ }; ++ let (u, p) = CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().unwrap(); ++ assert_eq!(u, "c"); ++ assert_eq!(p, "b"); ++ } ++ ++ #[test] ++ fn credential_helper5() { ++ let td = TempDir::new("git2-rs").unwrap(); ++ let path = td.path().join("git-credential-script"); ++ File::create(&path).unwrap().write(br"\ ++#!/bin/sh ++echo username=c ++").unwrap(); ++ chmod(&path); ++ ++ let paths = env::var("PATH").unwrap(); ++ let paths = env::split_paths(&paths) ++ .chain(path.parent().map(|p| p.to_path_buf()).into_iter()); ++ env::set_var("PATH", &env::join_paths(paths).unwrap()); ++ ++ let cfg = cfg! { ++ "credential.https://example.com.helper" => "script", ++ "credential.helper" => "!f() { echo username=a; echo password=b; }; f" ++ }; ++ let (u, p) = CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().unwrap(); ++ assert_eq!(u, "c"); ++ assert_eq!(p, "b"); ++ } ++ ++ #[test] ++ fn credential_helper6() { ++ let cfg = cfg! { ++ "credential.helper" => "" ++ }; ++ assert!(CredentialHelper::new("https://example.com/foo/bar") ++ .config(&cfg) ++ .execute().is_none()); ++ } ++ ++ #[test] ++ fn ssh_key_from_memory() { ++ let cred = Cred::ssh_key_from_memory( ++ "test", ++ Some("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDByAO8uj+kXicj6C2ODMspgmUoVyl5eaw8vR6a1yEnFuJFzevabNlN6Ut+CPT3TRnYk5BW73pyXBtnSL2X95BOnbjMDXc4YIkgs3YYHWnxbqsD4Pj/RoGqhf+gwhOBtL0poh8tT8WqXZYxdJQKLQC7oBqf3ykCEYulE4oeRUmNh4IzEE+skD/zDkaJ+S1HRD8D8YCiTO01qQnSmoDFdmIZTi8MS8Cw+O/Qhym1271ThMlhD6PubSYJXfE6rVbE7A9RzH73A6MmKBlzK8VTb4SlNSrr/DOk+L0uq+wPkv+pm+D9WtxoqQ9yl6FaK1cPawa3+7yRNle3m+72KCtyMkQv"), ++ r#" ++ -----BEGIN RSA PRIVATE KEY----- ++ Proc-Type: 4,ENCRYPTED ++ DEK-Info: AES-128-CBC,818C7722D3B01F2161C2ACF6A5BBAAE8 ++ ++ 3Cht4QB3PcoQ0I55j1B3m2ZzIC/mrh+K5nQeA1Vy2GBTMyM7yqGHqTOv7qLhJscd ++ H+cB0Pm6yCr3lYuNrcKWOCUto+91P7ikyARruHVwyIxKdNx15uNulOzQJHQWNbA4 ++ RQHlhjON4atVo2FyJ6n+ujK6QiBg2PR5Vbbw/AtV6zBCFW3PhzDn+qqmHjpBFqj2 ++ vZUUe+MkDQcaF5J45XMHahhSdo/uKCDhfbylExp/+ACWkvxdPpsvcARM6X434ucD ++ aPY+4i0/JyLkdbm0GFN9/q3i53qf4kCBhojFl4AYJdGI0AzAgbdTXZ7EJHbAGZHS ++ os5K0oTwDVXMI0sSE2I/qHxaZZsDP1dOKq6di6SFPUp8liYimm7rNintRX88Gl2L ++ g1ko9abp/NlgD0YY/3mad+NNAISDL/YfXq2fklH3En3/7ZrOVZFKfZXwQwas5g+p ++ VQPKi3+ae74iOjLyuPDSc1ePmhUNYeP+9rLSc0wiaiHqls+2blPPDxAGMEo63kbz ++ YPVjdmuVX4VWnyEsfTxxJdFDYGSNh6rlrrO1RFrex7kJvpg5gTX4M/FT8TfCd7Hn ++ M6adXsLMqwu5tz8FuDmAtVdq8zdSrgZeAbpJ9D3EDOmZ70xz4XBL19ImxDp+Qqs2 ++ kQX7kobRzeeP2URfRoGr7XZikQWyQ2UASfPcQULY8R58QoZWWsQ4w51GZHg7TDnw ++ 1DRo/0OgkK7Gqf215nFmMpB4uyi58cq3WFwWQa1IqslkObpVgBQZcNZb/hKUYPGk ++ g4zehfIgAfCdnQHwZvQ6Fdzhcs3SZeO+zVyuiZN3Gsi9HU0/1vpAKiuuOzcG02vF ++ b6Y6hwsAA9yphF3atI+ARD4ZwXdDfzuGb3yJglMT3Fr/xuLwAvdchRo1spANKA0E ++ tT5okLrK0H4wnHvf2SniVVWRhmJis0lQo9LjGGwRIdsPpVnJSDvaISIVF+fHT90r ++ HvxN8zXI93x9jcPtwp7puQ1C7ehKJK10sZ71OLIZeuUgwt+5DRunqg6evPco9Go7 ++ UOGwcVhLY200KT+1k7zWzCS0yVQp2HRm6cxsZXAp4ClBSwIx15eIoLIrjZdJRjCq ++ COp6pZx1fnvJ9ERIvl5hon+Ty+renMcFKz2HmchC7egpcqIxW9Dsv6zjhHle6pxb ++ 37GaEKHF2KA3RN+dSV/K8n+C9Yent5tx5Y9a/pMcgRGtgu+G+nyFmkPKn5Zt39yX ++ qDpyM0LtbRVZPs+MgiqoGIwYc/ujoCq7GL38gezsBQoHaTt79yYBqCp6UR0LMuZ5 ++ f/7CtWqffgySfJ/0wjGidDAumDv8CK45AURpL/Z+tbFG3M9ar/LZz/Y6EyBcLtGY ++ Wwb4zs8zXIA0qHrjNTnPqHDvezziArYfgPjxCIHMZzms9Yn8+N02p39uIytqg434 ++ BAlCqZ7GYdDFfTpWIwX+segTK9ux0KdBqcQv+9Fwwjkq9KySnRKqNl7ZJcefFZJq ++ c6PA1iinZWBjuaO1HKx3PFulrl0bcpR9Kud1ZIyfnh5rwYN8UQkkcR/wZPla04TY ++ 8l5dq/LI/3G5sZXwUHKOcuQWTj7Saq7Q6gkKoMfqt0wC5bpZ1m17GHPoMz6GtX9O ++ -----END RSA PRIVATE KEY----- ++ "#, ++ Some("test123")); ++ assert!(cred.is_ok()); ++ } ++ ++ ++ #[cfg(unix)] ++ fn chmod(path: &Path) { ++ use std::os::unix::prelude::*; ++ use std::fs; ++ let mut perms = fs::metadata(path).unwrap().permissions(); ++ perms.set_mode(0o755); ++ fs::set_permissions(path, perms).unwrap(); ++ } ++ #[cfg(windows)] ++ fn chmod(_path: &Path) {} ++} diff --cc vendor/git2-0.7.5/src/describe.rs index 000000000,000000000..c06208871 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/describe.rs @@@ -1,0 -1,0 +1,199 @@@ ++use std::marker; ++use std::mem; ++use std::ffi::CString; ++use std::ptr; ++ ++use libc::{c_uint, c_int}; ++ ++use {raw, Repository, Error, Buf}; ++use util::Binding; ++ ++/// The result of a `describe` operation on either an `Describe` or a ++/// `Repository`. ++pub struct Describe<'repo> { ++ raw: *mut raw::git_describe_result, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++/// Options which indicate how a `Describe` is created. ++pub struct DescribeOptions { ++ raw: raw::git_describe_options, ++ pattern: CString, ++} ++ ++/// Options which can be used to customize how a description is formatted. ++pub struct DescribeFormatOptions { ++ raw: raw::git_describe_format_options, ++ dirty_suffix: CString, ++} ++ ++impl<'repo> Describe<'repo> { ++ /// Prints this describe result, returning the result as a string. ++ pub fn format(&self, opts: Option<&DescribeFormatOptions>) ++ -> Result { ++ let buf = Buf::new(); ++ let opts = opts.map(|o| &o.raw as *const _).unwrap_or(ptr::null()); ++ unsafe { ++ try_call!(raw::git_describe_format(buf.raw(), self.raw, opts)); ++ } ++ Ok(String::from_utf8(buf.to_vec()).unwrap()) ++ } ++} ++ ++impl<'repo> Binding for Describe<'repo> { ++ type Raw = *mut raw::git_describe_result; ++ ++ unsafe fn from_raw(raw: *mut raw::git_describe_result) -> Describe<'repo> { ++ Describe { raw: raw, _marker: marker::PhantomData, } ++ } ++ fn raw(&self) -> *mut raw::git_describe_result { self.raw } ++} ++ ++impl<'repo> Drop for Describe<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_describe_result_free(self.raw) } ++ } ++} ++ ++impl Default for DescribeFormatOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl DescribeFormatOptions { ++ /// Creates a new blank set of formatting options for a description. ++ pub fn new() -> DescribeFormatOptions { ++ let mut opts = DescribeFormatOptions { ++ raw: unsafe { mem::zeroed() }, ++ dirty_suffix: CString::new(Vec::new()).unwrap(), ++ }; ++ opts.raw.version = 1; ++ opts.raw.abbreviated_size = 7; ++ opts ++ } ++ ++ /// Sets the size of the abbreviated commit id to use. ++ /// ++ /// The value is the lower bound for the length of the abbreviated string, ++ /// and the default is 7. ++ pub fn abbreviated_size(&mut self, size: u32) -> &mut Self { ++ self.raw.abbreviated_size = size as c_uint; ++ self ++ } ++ ++ /// Sets whether or not the long format is used even when a shorter name ++ /// could be used. ++ pub fn always_use_long_format(&mut self, long: bool) -> &mut Self { ++ self.raw.always_use_long_format = long as c_int; ++ self ++ } ++ ++ /// If the workdir is dirty and this is set, this string will be appended to ++ /// the description string. ++ pub fn dirty_suffix(&mut self, suffix: &str) -> &mut Self { ++ self.dirty_suffix = CString::new(suffix).unwrap(); ++ self.raw.dirty_suffix = self.dirty_suffix.as_ptr(); ++ self ++ } ++} ++ ++impl Default for DescribeOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl DescribeOptions { ++ /// Creates a new blank set of formatting options for a description. ++ pub fn new() -> DescribeOptions { ++ let mut opts = DescribeOptions { ++ raw: unsafe { mem::zeroed() }, ++ pattern: CString::new(Vec::new()).unwrap(), ++ }; ++ opts.raw.version = 1; ++ opts.raw.max_candidates_tags = 10; ++ opts ++ } ++ ++ #[allow(missing_docs)] ++ pub fn max_candidates_tags(&mut self, max: u32) -> &mut Self { ++ self.raw.max_candidates_tags = max as c_uint; ++ self ++ } ++ ++ /// Sets the reference lookup strategy ++ /// ++ /// This behaves like the `--tags` option to git-decribe. ++ pub fn describe_tags(&mut self) -> &mut Self { ++ self.raw.describe_strategy = raw::GIT_DESCRIBE_TAGS as c_uint; ++ self ++ } ++ ++ /// Sets the reference lookup strategy ++ /// ++ /// This behaves like the `--all` option to git-decribe. ++ pub fn describe_all(&mut self) -> &mut Self { ++ self.raw.describe_strategy = raw::GIT_DESCRIBE_ALL as c_uint; ++ self ++ } ++ ++ /// Indicates when calculating the distance from the matching tag or ++ /// reference whether to only walk down the first-parent ancestry. ++ pub fn only_follow_first_parent(&mut self, follow: bool) -> &mut Self { ++ self.raw.only_follow_first_parent = follow as c_int; ++ self ++ } ++ ++ /// If no matching tag or reference is found whether a describe option would ++ /// normally fail. This option indicates, however, that it will instead fall ++ /// back to showing the full id of the commit. ++ pub fn show_commit_oid_as_fallback(&mut self, show: bool) -> &mut Self { ++ self.raw.show_commit_oid_as_fallback = show as c_int; ++ self ++ } ++ ++ #[allow(missing_docs)] ++ pub fn pattern(&mut self, pattern: &str) -> &mut Self { ++ self.pattern = CString::new(pattern).unwrap(); ++ self.raw.pattern = self.pattern.as_ptr(); ++ self ++ } ++} ++ ++impl Binding for DescribeOptions { ++ type Raw = *mut raw::git_describe_options; ++ ++ unsafe fn from_raw(_raw: *mut raw::git_describe_options) ++ -> DescribeOptions { ++ panic!("unimplemened") ++ } ++ fn raw(&self) -> *mut raw::git_describe_options { ++ &self.raw as *const _ as *mut _ ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use DescribeOptions; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = t!(repo.head()).target().unwrap(); ++ ++ let d = t!(repo.describe(DescribeOptions::new() ++ .show_commit_oid_as_fallback(true))); ++ let id = head.to_string(); ++ assert_eq!(t!(d.format(None)), &id[..7]); ++ ++ let obj = t!(repo.find_object(head, None)); ++ let sig = t!(repo.signature()); ++ t!(repo.tag("foo", &obj, &sig, "message", true)); ++ let d = t!(repo.describe(&DescribeOptions::new())); ++ assert_eq!(t!(d.format(None)), "foo"); ++ ++ let d = t!(obj.describe(&DescribeOptions::new())); ++ assert_eq!(t!(d.format(None)), "foo"); ++ } ++} diff --cc vendor/git2-0.7.5/src/diff.rs index 000000000,000000000..dffd6117c new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/diff.rs @@@ -1,0 -1,0 +1,1258 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::mem; ++use std::ops::Range; ++use std::path::Path; ++use std::ptr; ++use std::slice; ++use libc::{c_char, size_t, c_void, c_int}; ++ ++use {raw, panic, Buf, Delta, Oid, Repository, Error, DiffFormat}; ++use {DiffStatsFormat, IntoCString}; ++use util::{self, Binding}; ++ ++/// The diff object that contains all individual file deltas. ++/// ++/// This is an opaque structure which will be allocated by one of the diff ++/// generator functions on the `Repository` structure (e.g. `diff_tree_to_tree` ++/// or other `diff_*` functions). ++pub struct Diff<'repo> { ++ raw: *mut raw::git_diff, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++unsafe impl<'repo> Send for Diff<'repo> {} ++ ++/// Description of changes to one entry. ++pub struct DiffDelta<'a> { ++ raw: *mut raw::git_diff_delta, ++ _marker: marker::PhantomData<&'a raw::git_diff_delta>, ++} ++ ++/// Description of one side of a delta. ++/// ++/// Although this is called a "file" it could represent a file, a symbolic ++/// link, a submodule commit id, or even a tree (although that only happens if ++/// you are tracking type changes or ignored/untracked directories). ++pub struct DiffFile<'a> { ++ raw: *const raw::git_diff_file, ++ _marker: marker::PhantomData<&'a raw::git_diff_file>, ++} ++ ++/// Structure describing options about how the diff should be executed. ++pub struct DiffOptions { ++ pathspec: Vec, ++ pathspec_ptrs: Vec<*const c_char>, ++ old_prefix: Option, ++ new_prefix: Option, ++ raw: raw::git_diff_options, ++} ++ ++/// Control behavior of rename and copy detection ++pub struct DiffFindOptions { ++ raw: raw::git_diff_find_options, ++} ++ ++/// An iterator over the diffs in a delta ++pub struct Deltas<'diff> { ++ range: Range, ++ diff: &'diff Diff<'diff>, ++} ++ ++/// Structure describing a line (or data span) of a diff. ++pub struct DiffLine<'a> { ++ raw: *const raw::git_diff_line, ++ _marker: marker::PhantomData<&'a raw::git_diff_line>, ++} ++ ++/// Structure describing a hunk of a diff. ++pub struct DiffHunk<'a> { ++ raw: *const raw::git_diff_hunk, ++ _marker: marker::PhantomData<&'a raw::git_diff_hunk>, ++} ++ ++/// Structure describing a hunk of a diff. ++pub struct DiffStats { ++ raw: *mut raw::git_diff_stats, ++} ++ ++/// Structure describing the binary contents of a diff. ++pub struct DiffBinary<'a> { ++ raw: *const raw::git_diff_binary, ++ _marker: marker::PhantomData<&'a raw::git_diff_binary>, ++} ++ ++/// The contents of one of the files in a binary diff. ++pub struct DiffBinaryFile<'a> { ++ raw: *const raw::git_diff_binary_file, ++ _marker: marker::PhantomData<&'a raw::git_diff_binary_file>, ++} ++ ++/// When producing a binary diff, the binary data returned will be ++/// either the deflated full ("literal") contents of the file, or ++/// the deflated binary delta between the two sides (whichever is ++/// smaller). ++#[derive(Copy, Clone, Debug)] ++pub enum DiffBinaryKind { ++ /// There is no binary delta ++ None, ++ /// The binary data is the literal contents of the file ++ Literal, ++ /// The binary data is the delta from one side to the other ++ Delta, ++} ++ ++type PrintCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; ++ ++pub type FileCb<'a> = FnMut(DiffDelta, f32) -> bool + 'a; ++pub type BinaryCb<'a> = FnMut(DiffDelta, DiffBinary) -> bool + 'a; ++pub type HunkCb<'a> = FnMut(DiffDelta, DiffHunk) -> bool + 'a; ++pub type LineCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; ++ ++struct ForeachCallbacks<'a, 'b: 'a, 'c, 'd: 'c, 'e, 'f: 'e, 'g, 'h: 'g> { ++ file: &'a mut FileCb<'b>, ++ binary: Option<&'c mut BinaryCb<'d>>, ++ hunk: Option<&'e mut HunkCb<'f>>, ++ line: Option<&'g mut LineCb<'h>>, ++} ++ ++impl<'repo> Diff<'repo> { ++ /// Merge one diff into another. ++ /// ++ /// This merges items from the "from" list into the "self" list. The ++ /// resulting diff will have all items that appear in either list. ++ /// If an item appears in both lists, then it will be "merged" to appear ++ /// as if the old version was from the "onto" list and the new version ++ /// is from the "from" list (with the exception that if the item has a ++ /// pending DELETE in the middle, then it will show as deleted). ++ pub fn merge(&mut self, from: &Diff<'repo>) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_diff_merge(self.raw, &*from.raw)); } ++ Ok(()) ++ } ++ ++ /// Returns an iterator over the deltas in this diff. ++ pub fn deltas(&self) -> Deltas { ++ let num_deltas = unsafe { raw::git_diff_num_deltas(&*self.raw) }; ++ Deltas { range: 0..(num_deltas as usize), diff: self } ++ } ++ ++ /// Return the diff delta for an entry in the diff list. ++ pub fn get_delta(&self, i: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_diff_get_delta(&*self.raw, i as size_t); ++ Binding::from_raw_opt(ptr as *mut _) ++ } ++ } ++ ++ /// Check if deltas are sorted case sensitively or insensitively. ++ pub fn is_sorted_icase(&self) -> bool { ++ unsafe { raw::git_diff_is_sorted_icase(&*self.raw) == 1 } ++ } ++ ++ /// Iterate over a diff generating formatted text output. ++ /// ++ /// Returning `false` from the callback will terminate the iteration and ++ /// return an error from this function. ++ pub fn print(&self, format: DiffFormat, mut cb: F) -> Result<(), Error> ++ where F: FnMut(DiffDelta, ++ Option, ++ DiffLine) -> bool { ++ let mut cb: &mut PrintCb = &mut cb; ++ let ptr = &mut cb as *mut _; ++ unsafe { ++ try_call!(raw::git_diff_print(self.raw, format, print_cb, ++ ptr as *mut _)); ++ Ok(()) ++ } ++ } ++ ++ /// Loop over all deltas in a diff issuing callbacks. ++ /// ++ /// Returning `false` from any callback will terminate the iteration and ++ /// return an error from this function. ++ pub fn foreach(&self, ++ file_cb: &mut FileCb, ++ binary_cb: Option<&mut BinaryCb>, ++ hunk_cb: Option<&mut HunkCb>, ++ line_cb: Option<&mut LineCb>) -> Result<(), Error> { ++ let mut cbs = ForeachCallbacks { ++ file: file_cb, ++ binary: binary_cb, ++ hunk: hunk_cb, ++ line: line_cb, ++ }; ++ let ptr = &mut cbs as *mut _; ++ unsafe { ++ let binary_cb_c = if cbs.binary.is_some() { ++ Some(binary_cb_c as raw::git_diff_binary_cb) ++ } else { ++ None ++ }; ++ let hunk_cb_c = if cbs.hunk.is_some() { ++ Some(hunk_cb_c as raw::git_diff_hunk_cb) ++ } else { ++ None ++ }; ++ let line_cb_c = if cbs.line.is_some() { ++ Some(line_cb_c as raw::git_diff_line_cb) ++ } else { ++ None ++ }; ++ try_call!(raw::git_diff_foreach(self.raw, file_cb_c, binary_cb_c, ++ hunk_cb_c, line_cb_c, ++ ptr as *mut _)); ++ Ok(()) ++ } ++ } ++ ++ /// Accumulate diff statistics for all patches. ++ pub fn stats(&self) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_get_stats(&mut ret, self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Transform a diff marking file renames, copies, etc. ++ /// ++ /// This modifies a diff in place, replacing old entries that look like ++ /// renames or copies with new entries reflecting those changes. This also ++ /// will, if requested, break modified files into add/remove pairs if the ++ /// amount of change is above a threshold. ++ pub fn find_similar(&mut self, opts: Option<&mut DiffFindOptions>) ++ -> Result<(), Error> { ++ let opts = opts.map(|opts| &opts.raw); ++ unsafe { try_call!(raw::git_diff_find_similar(self.raw, opts)); } ++ Ok(()) ++ } ++ ++ // TODO: num_deltas_of_type, format_email, find_similar ++} ++ ++pub extern fn print_cb(delta: *const raw::git_diff_delta, ++ hunk: *const raw::git_diff_hunk, ++ line: *const raw::git_diff_line, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let delta = Binding::from_raw(delta as *mut _); ++ let hunk = Binding::from_raw_opt(hunk); ++ let line = Binding::from_raw(line); ++ ++ let r = panic::wrap(|| { ++ let data = data as *mut &mut PrintCb; ++ (*data)(delta, hunk, line) ++ }); ++ if r == Some(true) {0} else {-1} ++ } ++} ++ ++extern fn file_cb_c(delta: *const raw::git_diff_delta, ++ progress: f32, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let delta = Binding::from_raw(delta as *mut _); ++ ++ let r = panic::wrap(|| { ++ let cbs = data as *mut ForeachCallbacks; ++ ((*cbs).file)(delta, progress) ++ }); ++ if r == Some(true) {0} else {-1} ++ } ++} ++ ++extern fn binary_cb_c(delta: *const raw::git_diff_delta, ++ binary: *const raw::git_diff_binary, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let delta = Binding::from_raw(delta as *mut _); ++ let binary = Binding::from_raw(binary); ++ ++ let r = panic::wrap(|| { ++ let cbs = data as *mut ForeachCallbacks; ++ match (*cbs).binary { ++ Some(ref mut cb) => cb(delta, binary), ++ None => false, ++ } ++ }); ++ if r == Some(true) {0} else {-1} ++ } ++} ++ ++extern fn hunk_cb_c(delta: *const raw::git_diff_delta, ++ hunk: *const raw::git_diff_hunk, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let delta = Binding::from_raw(delta as *mut _); ++ let hunk = Binding::from_raw(hunk); ++ ++ let r = panic::wrap(|| { ++ let cbs = data as *mut ForeachCallbacks; ++ match (*cbs).hunk { ++ Some(ref mut cb) => cb(delta, hunk), ++ None => false, ++ } ++ }); ++ if r == Some(true) {0} else {-1} ++ } ++} ++ ++extern fn line_cb_c(delta: *const raw::git_diff_delta, ++ hunk: *const raw::git_diff_hunk, ++ line: *const raw::git_diff_line, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let delta = Binding::from_raw(delta as *mut _); ++ let hunk = Binding::from_raw_opt(hunk); ++ let line = Binding::from_raw(line); ++ ++ let r = panic::wrap(|| { ++ let cbs = data as *mut ForeachCallbacks; ++ match (*cbs).line { ++ Some(ref mut cb) => cb(delta, hunk, line), ++ None => false, ++ } ++ }); ++ if r == Some(true) {0} else {-1} ++ } ++} ++ ++ ++impl<'repo> Binding for Diff<'repo> { ++ type Raw = *mut raw::git_diff; ++ unsafe fn from_raw(raw: *mut raw::git_diff) -> Diff<'repo> { ++ Diff { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_diff { self.raw } ++} ++ ++impl<'repo> Drop for Diff<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_diff_free(self.raw) } ++ } ++} ++ ++impl<'a> DiffDelta<'a> { ++ // TODO: expose when diffs are more exposed ++ // pub fn similarity(&self) -> u16 { ++ // unsafe { (*self.raw).similarity } ++ // } ++ ++ /// Returns the number of files in this delta. ++ pub fn nfiles(&self) -> u16 { ++ unsafe { (*self.raw).nfiles } ++ } ++ ++ /// Returns the status of this entry ++ /// ++ /// For more information, see `Delta`'s documentation ++ pub fn status(&self) -> Delta { ++ match unsafe { (*self.raw).status } { ++ raw::GIT_DELTA_UNMODIFIED => Delta::Unmodified, ++ raw::GIT_DELTA_ADDED => Delta::Added, ++ raw::GIT_DELTA_DELETED => Delta::Deleted, ++ raw::GIT_DELTA_MODIFIED => Delta::Modified, ++ raw::GIT_DELTA_RENAMED => Delta::Renamed, ++ raw::GIT_DELTA_COPIED => Delta::Copied, ++ raw::GIT_DELTA_IGNORED => Delta::Ignored, ++ raw::GIT_DELTA_UNTRACKED => Delta::Untracked, ++ raw::GIT_DELTA_TYPECHANGE => Delta::Typechange, ++ raw::GIT_DELTA_UNREADABLE => Delta::Unreadable, ++ raw::GIT_DELTA_CONFLICTED => Delta::Conflicted, ++ n => panic!("unknown diff status: {}", n), ++ } ++ } ++ ++ /// Return the file which represents the "from" side of the diff. ++ /// ++ /// What side this means depends on the function that was used to generate ++ /// the diff and will be documented on the function itself. ++ pub fn old_file(&self) -> DiffFile<'a> { ++ unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } ++ } ++ ++ /// Return the file which represents the "to" side of the diff. ++ /// ++ /// What side this means depends on the function that was used to generate ++ /// the diff and will be documented on the function itself. ++ pub fn new_file(&self) -> DiffFile<'a> { ++ unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } ++ } ++} ++ ++impl<'a> Binding for DiffDelta<'a> { ++ type Raw = *mut raw::git_diff_delta; ++ unsafe fn from_raw(raw: *mut raw::git_diff_delta) -> DiffDelta<'a> { ++ DiffDelta { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_diff_delta { self.raw } ++} ++ ++impl<'a> DiffFile<'a> { ++ /// Returns the Oid of this item. ++ /// ++ /// If this entry represents an absent side of a diff (e.g. the `old_file` ++ /// of a `Added` delta), then the oid returned will be zeroes. ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(&(*self.raw).id as *const _) } ++ } ++ ++ /// Returns the path, in bytes, of the entry relative to the working ++ /// directory of the repository. ++ pub fn path_bytes(&self) -> Option<&'a [u8]> { ++ static FOO: () = (); ++ unsafe { ::opt_bytes(&FOO, (*self.raw).path) } ++ } ++ ++ /// Returns the path of the entry relative to the working directory of the ++ /// repository. ++ pub fn path(&self) -> Option<&'a Path> { ++ self.path_bytes().map(util::bytes2path) ++ } ++ ++ /// Returns the size of this entry, in bytes ++ pub fn size(&self) -> u64 { unsafe { (*self.raw).size as u64 } } ++ ++ // TODO: expose flags/mode ++} ++ ++impl<'a> Binding for DiffFile<'a> { ++ type Raw = *const raw::git_diff_file; ++ unsafe fn from_raw(raw: *const raw::git_diff_file) -> DiffFile<'a> { ++ DiffFile { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *const raw::git_diff_file { self.raw } ++} ++ ++impl Default for DiffOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl DiffOptions { ++ /// Creates a new set of empty diff options. ++ /// ++ /// All flags and other options are defaulted to false or their otherwise ++ /// zero equivalents. ++ pub fn new() -> DiffOptions { ++ let mut opts = DiffOptions { ++ pathspec: Vec::new(), ++ pathspec_ptrs: Vec::new(), ++ raw: unsafe { mem::zeroed() }, ++ old_prefix: None, ++ new_prefix: None, ++ }; ++ assert_eq!(unsafe { ++ raw::git_diff_init_options(&mut opts.raw, 1) ++ }, 0); ++ opts ++ } ++ ++ fn flag(&mut self, opt: i32, val: bool) -> &mut DiffOptions { ++ let opt = opt as u32; ++ if val { ++ self.raw.flags |= opt; ++ } else { ++ self.raw.flags &= !opt; ++ } ++ self ++ } ++ ++ /// Flag indicating whether the sides of the diff will be reversed. ++ pub fn reverse(&mut self, reverse: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_REVERSE, reverse) ++ } ++ ++ /// Flag indicating whether ignored files are included. ++ pub fn include_ignored(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_IGNORED, include) ++ } ++ ++ /// Flag indicating whether ignored directories are traversed deeply or not. ++ pub fn recurse_ignored_dirs(&mut self, recurse: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_RECURSE_IGNORED_DIRS, recurse) ++ } ++ ++ /// Flag indicating whether untracked files are in the diff ++ pub fn include_untracked(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_UNTRACKED, include) ++ } ++ ++ /// Flag indicating whether untracked directories are deeply traversed or ++ /// not. ++ pub fn recurse_untracked_dirs(&mut self, recurse: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_RECURSE_UNTRACKED_DIRS, recurse) ++ } ++ ++ /// Flag indicating whether unmodified files are in the diff. ++ pub fn include_unmodified(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_UNMODIFIED, include) ++ } ++ ++ /// If entrabled, then Typechange delta records are generated. ++ pub fn include_typechange(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE, include) ++ } ++ ++ /// Event with `include_typechange`, the tree treturned generally shows a ++ /// deleted blow. This flag correctly labels the tree transitions as a ++ /// typechange record with the `new_file`'s mode set to tree. ++ /// ++ /// Note that the tree SHA will not be available. ++ pub fn include_typechange_trees(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE_TREES, include) ++ } ++ ++ /// Flag indicating whether file mode changes are ignored. ++ pub fn ignore_filemode(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_FILEMODE, ignore) ++ } ++ ++ /// Flag indicating whether all submodules should be treated as unmodified. ++ pub fn ignore_submodules(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_SUBMODULES, ignore) ++ } ++ ++ /// Flag indicating whether case insensitive filenames should be used. ++ pub fn ignore_case(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_CASE, ignore) ++ } ++ ++ /// If pathspecs are specified, this flag means that they should be applied ++ /// as an exact match instead of a fnmatch pattern. ++ pub fn disable_pathspec_match(&mut self, disable: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_DISABLE_PATHSPEC_MATCH, disable) ++ } ++ ++ /// Disable updating the `binary` flag in delta records. This is useful when ++ /// iterating over a diff if you don't need hunk and data callbacks and want ++ /// to avoid having to load a file completely. ++ pub fn skip_binary_check(&mut self, skip: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_SKIP_BINARY_CHECK, skip) ++ } ++ ++ /// When diff finds an untracked directory, to match the behavior of core ++ /// Git, it scans the contents for ignored and untracked files. If all ++ /// contents are ignored, then the directory is ignored; if any contents are ++ /// not ignored, then the directory is untracked. This is extra work that ++ /// may not matter in many cases. ++ /// ++ /// This flag turns off that scan and immediately labels an untracked ++ /// directory as untracked (changing the behavior to not match core git). ++ pub fn enable_fast_untracked_dirs(&mut self, enable: bool) ++ -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS, enable) ++ } ++ ++ /// When diff finds a file in the working directory with stat information ++ /// different from the index, but the OID ends up being the same, write the ++ /// correct stat information into the index. Note: without this flag, diff ++ /// will always leave the index untouched. ++ pub fn update_index(&mut self, update: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_UPDATE_INDEX, update) ++ } ++ ++ /// Include unreadable files in the diff ++ pub fn include_unreadable(&mut self, include: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE, include) ++ } ++ ++ /// Include unreadable files in the diff ++ pub fn include_unreadable_as_untracked(&mut self, include: bool) ++ -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED, include) ++ } ++ ++ /// Treat all files as text, disabling binary attributes and detection. ++ pub fn force_text(&mut self, force: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_FORCE_TEXT, force) ++ } ++ ++ /// Treat all files as binary, disabling text diffs ++ pub fn force_binary(&mut self, force: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_FORCE_TEXT, force) ++ } ++ ++ /// Ignore all whitespace ++ pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE, ignore) ++ } ++ ++ /// Ignore changes in the amount of whitespace ++ pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_CHANGE, ignore) ++ } ++ ++ /// Ignore whitespace at tend of line ++ pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_EOL, ignore) ++ } ++ ++ /// When generating patch text, include the content of untracked files. ++ /// ++ /// This automatically turns on `include_untracked` but it does not turn on ++ /// `recurse_untracked_dirs`. Add that flag if you want the content of every ++ /// single untracked file. ++ pub fn show_untracked_content(&mut self, show: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_SHOW_UNTRACKED_CONTENT, show) ++ } ++ ++ /// When generating output, include the names of unmodified files if they ++ /// are included in the `Diff`. Normally these are skipped in the formats ++ /// that list files (e.g. name-only, name-status, raw). Even with this these ++ /// will not be included in the patch format. ++ pub fn show_unmodified(&mut self, show: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_SHOW_UNMODIFIED, show) ++ } ++ ++ /// Use the "patience diff" algorithm ++ pub fn patience(&mut self, patience: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_PATIENCE, patience) ++ } ++ ++ /// Take extra time to find the minimal diff ++ pub fn minimal(&mut self, minimal: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_MINIMAL, minimal) ++ } ++ ++ /// Include the necessary deflate/delta information so that `git-apply` can ++ /// apply given diff information to binary files. ++ pub fn show_binary(&mut self, show: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_SHOW_BINARY, show) ++ } ++ ++ /// Use a heuristic that takes indentation and whitespace into account ++ /// which generally can produce better diffs when dealing with ambiguous ++ /// diff hunks. ++ pub fn indent_heuristic(&mut self, heuristic: bool) -> &mut DiffOptions { ++ self.flag(raw::GIT_DIFF_INDENT_HEURISTIC, heuristic) ++ } ++ ++ /// Set the number of unchanged lines that define the boundary of a hunk ++ /// (and to display before and after). ++ /// ++ /// The default value for this is 3. ++ pub fn context_lines(&mut self, lines: u32) -> &mut DiffOptions { ++ self.raw.context_lines = lines; ++ self ++ } ++ ++ /// Set the maximum number of unchanged lines between hunk boundaries before ++ /// the hunks will be merged into one. ++ /// ++ /// The default value for this is 0. ++ pub fn interhunk_lines(&mut self, lines: u32) -> &mut DiffOptions { ++ self.raw.interhunk_lines = lines; ++ self ++ } ++ ++ /// The default value for this is `core.abbrev` or 7 if unset. ++ pub fn id_abbrev(&mut self, abbrev: u16) -> &mut DiffOptions { ++ self.raw.id_abbrev = abbrev; ++ self ++ } ++ ++ /// Maximum size (in bytes) above which a blob will be marked as binary ++ /// automatically. ++ /// ++ /// A negative value will disable this entirely. ++ /// ++ /// The default value for this is 512MB. ++ pub fn max_size(&mut self, size: i64) -> &mut DiffOptions { ++ self.raw.max_size = size as raw::git_off_t; ++ self ++ } ++ ++ /// The virtual "directory" to prefix old file names with in hunk headers. ++ /// ++ /// The default value for this is "a". ++ pub fn old_prefix(&mut self, t: T) -> &mut DiffOptions { ++ self.old_prefix = Some(t.into_c_string().unwrap()); ++ self ++ } ++ ++ /// The virtual "directory" to prefix new file names with in hunk headers. ++ /// ++ /// The default value for this is "b". ++ pub fn new_prefix(&mut self, t: T) -> &mut DiffOptions { ++ self.new_prefix = Some(t.into_c_string().unwrap()); ++ self ++ } ++ ++ /// Add to the array of paths/fnmatch patterns to constrain the diff. ++ pub fn pathspec(&mut self, pathspec: T) ++ -> &mut DiffOptions { ++ let s = pathspec.into_c_string().unwrap(); ++ self.pathspec_ptrs.push(s.as_ptr()); ++ self.pathspec.push(s); ++ self ++ } ++ ++ /// Acquire a pointer to the underlying raw options. ++ /// ++ /// This function is unsafe as the pointer is only valid so long as this ++ /// structure is not moved, modified, or used elsewhere. ++ pub unsafe fn raw(&mut self) -> *const raw::git_diff_options { ++ self.raw.old_prefix = self.old_prefix.as_ref().map(|s| s.as_ptr()) ++ .unwrap_or(ptr::null()); ++ self.raw.new_prefix = self.new_prefix.as_ref().map(|s| s.as_ptr()) ++ .unwrap_or(ptr::null()); ++ self.raw.pathspec.count = self.pathspec_ptrs.len() as size_t; ++ self.raw.pathspec.strings = self.pathspec_ptrs.as_ptr() as *mut _; ++ &self.raw as *const _ ++ } ++ ++ // TODO: expose ignore_submodules, notify_cb/notify_payload ++} ++ ++impl<'diff> Iterator for Deltas<'diff> { ++ type Item = DiffDelta<'diff>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.diff.get_delta(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'diff> DoubleEndedIterator for Deltas<'diff> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.diff.get_delta(i)) ++ } ++} ++impl<'diff> ExactSizeIterator for Deltas<'diff> {} ++ ++impl<'a> DiffLine<'a> { ++ /// Line number in old file or `None` for added line ++ pub fn old_lineno(&self) -> Option { ++ match unsafe { (*self.raw).old_lineno } { ++ n if n < 0 => None, ++ n => Some(n as u32), ++ } ++ } ++ ++ /// Line number in new file or `None` for deleted line ++ pub fn new_lineno(&self) -> Option { ++ match unsafe { (*self.raw).new_lineno } { ++ n if n < 0 => None, ++ n => Some(n as u32), ++ } ++ } ++ ++ /// Number of newline characters in content ++ pub fn num_lines(&self) -> u32 { ++ unsafe { (*self.raw).num_lines as u32 } ++ } ++ ++ /// Offset in the original file to the content ++ pub fn content_offset(&self) -> i64 { ++ unsafe { (*self.raw).content_offset as i64 } ++ } ++ ++ /// Content of this line as bytes. ++ pub fn content(&self) -> &[u8] { ++ unsafe { ++ slice::from_raw_parts((*self.raw).content as *const u8, ++ (*self.raw).content_len as usize) ++ } ++ } ++ ++ /// Sigil showing the origin of this `DiffLine`. ++ /// ++ /// * ` ` - Line context ++ /// * `+` - Line addition ++ /// * `-` - Line deletion ++ /// * `=` - Context (End of file) ++ /// * `>` - Add (End of file) ++ /// * `<` - Remove (End of file) ++ /// * `F` - File header ++ /// * `H` - Hunk header ++ /// * `B` - Line binary ++ pub fn origin(&self) -> char { ++ match unsafe { (*self.raw).origin as raw::git_diff_line_t } { ++ raw::GIT_DIFF_LINE_CONTEXT => ' ', ++ raw::GIT_DIFF_LINE_ADDITION => '+', ++ raw::GIT_DIFF_LINE_DELETION => '-', ++ raw::GIT_DIFF_LINE_CONTEXT_EOFNL => '=', ++ raw::GIT_DIFF_LINE_ADD_EOFNL => '>', ++ raw::GIT_DIFF_LINE_DEL_EOFNL => '<', ++ raw::GIT_DIFF_LINE_FILE_HDR => 'F', ++ raw::GIT_DIFF_LINE_HUNK_HDR => 'H', ++ raw::GIT_DIFF_LINE_BINARY => 'B', ++ _ => ' ', ++ } ++ } ++} ++ ++impl<'a> Binding for DiffLine<'a> { ++ type Raw = *const raw::git_diff_line; ++ unsafe fn from_raw(raw: *const raw::git_diff_line) -> DiffLine<'a> { ++ DiffLine { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *const raw::git_diff_line { self.raw } ++} ++ ++impl<'a> DiffHunk<'a> { ++ /// Starting line number in old_file ++ pub fn old_start(&self) -> u32 { ++ unsafe { (*self.raw).old_start as u32 } ++ } ++ ++ /// Number of lines in old_file ++ pub fn old_lines(&self) -> u32 { ++ unsafe { (*self.raw).old_lines as u32 } ++ } ++ ++ /// Starting line number in new_file ++ pub fn new_start(&self) -> u32 { ++ unsafe { (*self.raw).new_start as u32 } ++ } ++ ++ /// Number of lines in new_file ++ pub fn new_lines(&self) -> u32 { ++ unsafe { (*self.raw).new_lines as u32 } ++ } ++ ++ /// Header text ++ pub fn header(&self) -> &[u8] { ++ unsafe { ++ slice::from_raw_parts((*self.raw).header.as_ptr() as *const u8, ++ (*self.raw).header_len as usize) ++ } ++ } ++} ++ ++impl<'a> Binding for DiffHunk<'a> { ++ type Raw = *const raw::git_diff_hunk; ++ unsafe fn from_raw(raw: *const raw::git_diff_hunk) -> DiffHunk<'a> { ++ DiffHunk { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *const raw::git_diff_hunk { self.raw } ++} ++ ++impl DiffStats { ++ /// Get the total number of files chaned in a diff. ++ pub fn files_changed(&self) -> usize { ++ unsafe { raw::git_diff_stats_files_changed(&*self.raw) as usize } ++ } ++ ++ /// Get the total number of insertions in a diff ++ pub fn insertions(&self) -> usize { ++ unsafe { raw::git_diff_stats_insertions(&*self.raw) as usize } ++ } ++ ++ /// Get the total number of deletions in a diff ++ pub fn deletions(&self) -> usize { ++ unsafe { raw::git_diff_stats_deletions(&*self.raw) as usize } ++ } ++ ++ /// Print diff statistics to a Buf ++ pub fn to_buf(&self, format: DiffStatsFormat, width: usize) ++ -> Result { ++ let buf = Buf::new(); ++ unsafe { ++ try_call!(raw::git_diff_stats_to_buf(buf.raw(), self.raw, ++ format.bits(), ++ width as size_t)); ++ } ++ Ok(buf) ++ } ++} ++ ++impl Binding for DiffStats { ++ type Raw = *mut raw::git_diff_stats; ++ ++ unsafe fn from_raw(raw: *mut raw::git_diff_stats) -> DiffStats { ++ DiffStats { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_diff_stats { self.raw } ++} ++ ++impl Drop for DiffStats { ++ fn drop(&mut self) { ++ unsafe { raw::git_diff_stats_free(self.raw) } ++ } ++} ++ ++impl<'a> DiffBinary<'a> { ++ /// Returns whether there is data in this binary structure or not. ++ /// ++ /// If this is `true`, then this was produced and included binary content. ++ /// If this is `false` then this was generated knowing only that a binary ++ /// file changed but without providing the data, probably from a patch that ++ /// said `Binary files a/file.txt and b/file.txt differ`. ++ pub fn contains_data(&self) -> bool { ++ unsafe { (*self.raw).contains_data == 1 } ++ } ++ ++ /// The contents of the old file. ++ pub fn old_file(&self) -> DiffBinaryFile<'a> { ++ unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } ++ } ++ ++ /// The contents of the new file. ++ pub fn new_file(&self) -> DiffBinaryFile<'a> { ++ unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } ++ } ++} ++ ++impl<'a> Binding for DiffBinary<'a> { ++ type Raw = *const raw::git_diff_binary; ++ unsafe fn from_raw(raw: *const raw::git_diff_binary) -> DiffBinary<'a> { ++ DiffBinary { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *const raw::git_diff_binary { self.raw } ++} ++ ++impl<'a> DiffBinaryFile<'a> { ++ /// The type of binary data for this file ++ pub fn kind(&self) -> DiffBinaryKind { ++ unsafe { Binding::from_raw((*self.raw).kind) } ++ } ++ ++ /// The binary data, deflated ++ pub fn data(&self) -> &[u8] { ++ unsafe { ++ slice::from_raw_parts((*self.raw).data as *const u8, ++ (*self.raw).datalen as usize) ++ } ++ } ++ ++ /// The length of the binary data after inflation ++ pub fn inflated_len(&self) -> usize { ++ unsafe { (*self.raw).inflatedlen as usize } ++ } ++ ++} ++ ++impl<'a> Binding for DiffBinaryFile<'a> { ++ type Raw = *const raw::git_diff_binary_file; ++ unsafe fn from_raw(raw: *const raw::git_diff_binary_file) -> DiffBinaryFile<'a> { ++ DiffBinaryFile { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *const raw::git_diff_binary_file { self.raw } ++} ++ ++impl Binding for DiffBinaryKind { ++ type Raw = raw::git_diff_binary_t; ++ unsafe fn from_raw(raw: raw::git_diff_binary_t) -> DiffBinaryKind { ++ match raw { ++ raw::GIT_DIFF_BINARY_NONE => DiffBinaryKind::None, ++ raw::GIT_DIFF_BINARY_LITERAL => DiffBinaryKind::Literal, ++ raw::GIT_DIFF_BINARY_DELTA => DiffBinaryKind::Delta, ++ _ => panic!("Unknown git diff binary kind"), ++ } ++ } ++ fn raw(&self) -> raw::git_diff_binary_t { ++ match *self { ++ DiffBinaryKind::None => raw::GIT_DIFF_BINARY_NONE, ++ DiffBinaryKind::Literal => raw::GIT_DIFF_BINARY_LITERAL, ++ DiffBinaryKind::Delta => raw::GIT_DIFF_BINARY_DELTA, ++ } ++ } ++} ++ ++impl Default for DiffFindOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl DiffFindOptions { ++ /// Creates a new set of empty diff find options. ++ /// ++ /// All flags and other options are defaulted to false or their otherwise ++ /// zero equivalents. ++ pub fn new() -> DiffFindOptions { ++ let mut opts = DiffFindOptions { ++ raw: unsafe { mem::zeroed() }, ++ }; ++ assert_eq!(unsafe { ++ raw::git_diff_find_init_options(&mut opts.raw, 1) ++ }, 0); ++ opts ++ } ++ ++ fn flag(&mut self, opt: u32, val: bool) -> &mut DiffFindOptions { ++ if val { ++ self.raw.flags |= opt; ++ } else { ++ self.raw.flags &= !opt; ++ } ++ self ++ } ++ ++ /// Reset all flags back to their unset state, indicating that ++ /// `diff.renames` should be used instead. This is overridden once any flag ++ /// is set. ++ pub fn by_config(&mut self) -> &mut DiffFindOptions { ++ self.flag(0xffffffff, false) ++ } ++ ++ /// Look for renames? ++ pub fn renames(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_RENAMES, find) ++ } ++ ++ /// Consider old side of modified for renames? ++ pub fn renames_from_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_RENAMES_FROM_REWRITES, find) ++ } ++ ++ /// Look for copies? ++ pub fn copies(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_COPIES, find) ++ } ++ ++ /// Consider unmodified as copy sources? ++ /// ++ /// For this to work correctly, use `include_unmodified` when the initial ++ /// diff is being generated. ++ pub fn copies_from_unmodified(&mut self, find: bool) ++ -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_COPIES_FROM_UNMODIFIED, find) ++ } ++ ++ /// Mark significant rewrites for split. ++ pub fn rewrites(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_REWRITES, find) ++ } ++ ++ /// Actually split large rewrites into delete/add pairs ++ pub fn break_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_BREAK_REWRITES, find) ++ } ++ ++ #[doc(hidden)] ++ pub fn break_rewries(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.break_rewrites(find) ++ } ++ ++ /// Find renames/copies for untracked items in working directory. ++ /// ++ /// For this to work correctly use the `include_untracked` option when the ++ /// initial diff is being generated. ++ pub fn for_untracked(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_FOR_UNTRACKED, find) ++ } ++ ++ /// Turn on all finding features. ++ pub fn all(&mut self, find: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_ALL, find) ++ } ++ ++ /// Measure similarity ignoring leading whitespace (default) ++ pub fn ignore_leading_whitespace(&mut self, ignore: bool) ++ -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_IGNORE_LEADING_WHITESPACE, ignore) ++ } ++ ++ /// Measure similarity ignoring all whitespace ++ pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_IGNORE_WHITESPACE, ignore) ++ } ++ ++ /// Measure similarity including all data ++ pub fn dont_ignore_whitespace(&mut self, dont: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_DONT_IGNORE_WHITESPACE, dont) ++ } ++ ++ /// Measure similarity only by comparing SHAs (fast and cheap) ++ pub fn exact_match_only(&mut self, exact: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_EXACT_MATCH_ONLY, exact) ++ } ++ ++ /// Do not break rewrites unless they contribute to a rename. ++ /// ++ /// Normally, `break_rewrites` and `rewrites` will measure the ++ /// self-similarity of modified files and split the ones that have changed a ++ /// lot into a delete/add pair. Then the sides of that pair will be ++ /// considered candidates for rename and copy detection ++ /// ++ /// If you add this flag in and the split pair is not used for an actual ++ /// rename or copy, then the modified record will be restored to a regular ++ /// modified record instead of being split. ++ pub fn break_rewrites_for_renames_only(&mut self, b: bool) ++ -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_BREAK_REWRITES_FOR_RENAMES_ONLY, b) ++ } ++ ++ /// Remove any unmodified deltas after find_similar is done. ++ /// ++ /// Using `copies_from_unmodified` to emulate the `--find-copies-harder` ++ /// behavior requires building a diff with the `include_unmodified` flag. If ++ /// you do not want unmodified records in the final result, pas this flag to ++ /// have them removed. ++ pub fn remove_unmodified(&mut self, remove: bool) -> &mut DiffFindOptions { ++ self.flag(raw::GIT_DIFF_FIND_REMOVE_UNMODIFIED, remove) ++ } ++ ++ /// Similarity to consider a file renamed (default 50) ++ pub fn rename_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { ++ self.raw.rename_threshold = thresh; ++ self ++ } ++ ++ /// Similarity of modified to be glegible rename source (default 50) ++ pub fn rename_from_rewrite_threshold(&mut self, thresh: u16) ++ -> &mut DiffFindOptions { ++ self.raw.rename_from_rewrite_threshold = thresh; ++ self ++ } ++ ++ /// Similarity to consider a file copy (default 50) ++ pub fn copy_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { ++ self.raw.copy_threshold = thresh; ++ self ++ } ++ ++ /// Similarity to split modify into delete/add pair (default 60) ++ pub fn break_rewrite_threshold(&mut self, thresh: u16) ++ -> &mut DiffFindOptions { ++ self.raw.break_rewrite_threshold = thresh; ++ self ++ } ++ ++ /// Maximum similarity sources to examine for a file (somewhat like ++ /// git-diff's `-l` option or `diff.renameLimit` config) ++ /// ++ /// Defaults to 200 ++ pub fn rename_limit(&mut self, limit: usize) -> &mut DiffFindOptions { ++ self.raw.rename_limit = limit as size_t; ++ self ++ } ++ ++ // TODO: expose git_diff_similarity_metric ++} ++ ++#[cfg(test)] ++mod tests { ++ use DiffOptions; ++ use std::fs::File; ++ use std::path::Path; ++ use std::borrow::Borrow; ++ use std::io::Write; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let diff = repo.diff_tree_to_workdir(None, None).unwrap(); ++ assert_eq!(diff.deltas().len(), 0); ++ let stats = diff.stats().unwrap(); ++ assert_eq!(stats.insertions(), 0); ++ assert_eq!(stats.deletions(), 0); ++ assert_eq!(stats.files_changed(), 0); ++ } ++ ++ #[test] ++ fn foreach_smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let diff = t!(repo.diff_tree_to_workdir(None, None)); ++ let mut count = 0; ++ t!(diff.foreach(&mut |_file, _progress| { count = count + 1; true }, ++ None, None, None)); ++ assert_eq!(count, 0); ++ } ++ ++ #[test] ++ fn foreach_file_only() { ++ let path = Path::new("foo"); ++ let (td, repo) = ::test::repo_init(); ++ t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); ++ let mut opts = DiffOptions::new(); ++ opts.include_untracked(true); ++ let diff = t!(repo.diff_tree_to_workdir(None, Some(&mut opts))); ++ let mut count = 0; ++ let mut result = None; ++ t!(diff.foreach(&mut |file, _progress| { ++ count = count + 1; ++ result = file.new_file().path().map(ToOwned::to_owned); ++ true ++ }, None, None, None)); ++ assert_eq!(result.as_ref().map(Borrow::borrow), Some(path)); ++ assert_eq!(count, 1); ++ } ++ ++ #[test] ++ fn foreach_file_and_hunk() { ++ let path = Path::new("foo"); ++ let (td, repo) = ::test::repo_init(); ++ t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); ++ let mut index = t!(repo.index()); ++ t!(index.add_path(path)); ++ let mut opts = DiffOptions::new(); ++ opts.include_untracked(true); ++ let diff = t!(repo.diff_tree_to_index(None, Some(&index), ++ Some(&mut opts))); ++ let mut new_lines = 0; ++ t!(diff.foreach( ++ &mut |_file, _progress| { true }, ++ None, ++ Some(&mut |_file, hunk| { ++ new_lines = hunk.new_lines(); ++ true ++ }), ++ None)); ++ assert_eq!(new_lines, 1); ++ } ++ ++ #[test] ++ fn foreach_all_callbacks() { ++ let fib = vec![0, 1, 1, 2, 3, 5, 8]; ++ // Verified with a node implementation of deflate, might be worth ++ // adding a deflate lib to do this inline here. ++ let deflated_fib = vec![120, 156, 99, 96, 100, 100, 98, 102, 229, 0, 0, ++ 0, 53, 0, 21]; ++ let foo_path = Path::new("foo"); ++ let bin_path = Path::new("bin"); ++ let (td, repo) = ::test::repo_init(); ++ t!(t!(File::create(&td.path().join(foo_path))).write_all(b"bar\n")); ++ t!(t!(File::create(&td.path().join(bin_path))).write_all(&fib)); ++ let mut index = t!(repo.index()); ++ t!(index.add_path(foo_path)); ++ t!(index.add_path(bin_path)); ++ let mut opts = DiffOptions::new(); ++ opts.include_untracked(true).show_binary(true); ++ let diff = t!(repo.diff_tree_to_index(None, Some(&index), ++ Some(&mut opts))); ++ let mut bin_content = None; ++ let mut new_lines = 0; ++ let mut line_content = None; ++ t!(diff.foreach( ++ &mut |_file, _progress| { true }, ++ Some(&mut |_file, binary| { ++ bin_content = Some(binary.new_file().data().to_owned()); ++ true ++ }), ++ Some(&mut |_file, hunk| { ++ new_lines = hunk.new_lines(); ++ true ++ }), ++ Some(&mut |_file, _hunk, line| { ++ line_content = String::from_utf8(line.content().into()).ok(); ++ true ++ }))); ++ assert_eq!(bin_content, Some(deflated_fib)); ++ assert_eq!(new_lines, 1); ++ assert_eq!(line_content, Some("bar\n".to_string())); ++ } ++} diff --cc vendor/git2-0.7.5/src/error.rs index 000000000,000000000..1e92b23a0 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/error.rs @@@ -1,0 -1,0 +1,284 @@@ ++use std::env::JoinPathsError; ++use std::ffi::{CStr, NulError}; ++use std::error; ++use std::fmt; ++use std::str; ++use libc::c_int; ++ ++use {raw, ErrorClass, ErrorCode}; ++ ++/// A structure to represent errors coming out of libgit2. ++#[derive(Debug,PartialEq)] ++pub struct Error { ++ code: c_int, ++ klass: c_int, ++ message: String, ++} ++ ++impl Error { ++ /// Returns the last error that happened with the code specified by `code`. ++ /// ++ /// The `code` argument typically comes from the return value of a function ++ /// call. This code will later be returned from the `code` function. ++ /// ++ /// Historically this function returned `Some` or `None` based on the return ++ /// value of `giterr_last` but nowadays it always returns `Some` so it's ++ /// safe to unwrap the return value. This API will change in the next major ++ /// version. ++ pub fn last_error(code: c_int) -> Option { ++ ::init(); ++ unsafe { ++ // Note that whenever libgit2 returns an error any negative value ++ // indicates that an error happened. Auxiliary information is ++ // *usually* in `giterr_last` but unfortunately that's not always ++ // the case. Sometimes a negative error code is returned from ++ // libgit2 *without* calling `giterr_set` internally to configure ++ // the error. ++ // ++ // To handle this case and hopefully provide better error messages ++ // on our end we unconditionally call `giterr_clear` when we're done ++ // with an error. This is an attempt to clear it as aggressively as ++ // possible when we can to ensure that error information from one ++ // api invocation doesn't leak over to the next api invocation. ++ // ++ // Additionally if `giterr_last` returns null then we returned a ++ // canned error out. ++ let ptr = raw::giterr_last(); ++ let err = if ptr.is_null() { ++ let mut error = Error::from_str("an unknown git error occurred"); ++ error.code = code; ++ error ++ } else { ++ Error::from_raw(code, ptr) ++ }; ++ raw::giterr_clear(); ++ Some(err) ++ } ++ } ++ ++ unsafe fn from_raw(code: c_int, ptr: *const raw::git_error) -> Error { ++ let msg = CStr::from_ptr((*ptr).message as *const _).to_bytes(); ++ let msg = String::from_utf8_lossy(msg).into_owned(); ++ Error { code: code, klass: (*ptr).klass, message: msg } ++ } ++ ++ /// Creates a new error from the given string as the error. ++ /// ++ /// The error returned will have the code `GIT_ERROR` and the class ++ /// `GITERR_NONE`. ++ pub fn from_str(s: &str) -> Error { ++ Error { ++ code: raw::GIT_ERROR as c_int, ++ klass: raw::GITERR_NONE as c_int, ++ message: s.to_string(), ++ } ++ } ++ ++ /// Return the error code associated with this error. ++ /// ++ /// An error code is intended to be programmatically actionable most of the ++ /// time. For example the code `GIT_EAGAIN` indicates that an error could be ++ /// fixed by trying again, while the code `GIT_ERROR` is more bland and ++ /// doesn't convey anything in particular. ++ pub fn code(&self) -> ErrorCode { ++ match self.raw_code() { ++ raw::GIT_OK => super::ErrorCode::GenericError, ++ raw::GIT_ERROR => super::ErrorCode::GenericError, ++ raw::GIT_ENOTFOUND => super::ErrorCode::NotFound, ++ raw::GIT_EEXISTS => super::ErrorCode::Exists, ++ raw::GIT_EAMBIGUOUS => super::ErrorCode::Ambiguous, ++ raw::GIT_EBUFS => super::ErrorCode::BufSize, ++ raw::GIT_EUSER => super::ErrorCode::User, ++ raw::GIT_EBAREREPO => super::ErrorCode::BareRepo, ++ raw::GIT_EUNBORNBRANCH => super::ErrorCode::UnbornBranch, ++ raw::GIT_EUNMERGED => super::ErrorCode::Unmerged, ++ raw::GIT_ENONFASTFORWARD => super::ErrorCode::NotFastForward, ++ raw::GIT_EINVALIDSPEC => super::ErrorCode::InvalidSpec, ++ raw::GIT_ECONFLICT => super::ErrorCode::Conflict, ++ raw::GIT_ELOCKED => super::ErrorCode::Locked, ++ raw::GIT_EMODIFIED => super::ErrorCode::Modified, ++ raw::GIT_PASSTHROUGH => super::ErrorCode::GenericError, ++ raw::GIT_ITEROVER => super::ErrorCode::GenericError, ++ raw::GIT_EAUTH => super::ErrorCode::Auth, ++ raw::GIT_ECERTIFICATE => super::ErrorCode::Certificate, ++ raw::GIT_EAPPLIED => super::ErrorCode::Applied, ++ raw::GIT_EPEEL => super::ErrorCode::Peel, ++ raw::GIT_EEOF => super::ErrorCode::Eof, ++ raw::GIT_EINVALID => super::ErrorCode::Invalid, ++ raw::GIT_EUNCOMMITTED => super::ErrorCode::Uncommitted, ++ raw::GIT_EDIRECTORY => super::ErrorCode::Directory, ++ _ => super::ErrorCode::GenericError, ++ } ++ } ++ ++ /// Return the error class associated with this error. ++ /// ++ /// Error classes are in general mostly just informative. For example the ++ /// class will show up in the error message but otherwise an error class is ++ /// typically not directly actionable. ++ pub fn class(&self) -> ErrorClass { ++ match self.raw_class() { ++ raw::GITERR_NONE => super::ErrorClass::None, ++ raw::GITERR_NOMEMORY => super::ErrorClass::NoMemory, ++ raw::GITERR_OS => super::ErrorClass::Os, ++ raw::GITERR_INVALID => super::ErrorClass::Invalid, ++ raw::GITERR_REFERENCE => super::ErrorClass::Reference, ++ raw::GITERR_ZLIB => super::ErrorClass::Zlib, ++ raw::GITERR_REPOSITORY => super::ErrorClass::Repository, ++ raw::GITERR_CONFIG => super::ErrorClass::Config, ++ raw::GITERR_REGEX => super::ErrorClass::Regex, ++ raw::GITERR_ODB => super::ErrorClass::Odb, ++ raw::GITERR_INDEX => super::ErrorClass::Index, ++ raw::GITERR_OBJECT => super::ErrorClass::Object, ++ raw::GITERR_NET => super::ErrorClass::Net, ++ raw::GITERR_TAG => super::ErrorClass::Tag, ++ raw::GITERR_TREE => super::ErrorClass::Tree, ++ raw::GITERR_INDEXER => super::ErrorClass::Indexer, ++ raw::GITERR_SSL => super::ErrorClass::Ssl, ++ raw::GITERR_SUBMODULE => super::ErrorClass::Submodule, ++ raw::GITERR_THREAD => super::ErrorClass::Thread, ++ raw::GITERR_STASH => super::ErrorClass::Stash, ++ raw::GITERR_CHECKOUT => super::ErrorClass::Checkout, ++ raw::GITERR_FETCHHEAD => super::ErrorClass::FetchHead, ++ raw::GITERR_MERGE => super::ErrorClass::Merge, ++ raw::GITERR_SSH => super::ErrorClass::Ssh, ++ raw::GITERR_FILTER => super::ErrorClass::Filter, ++ raw::GITERR_REVERT => super::ErrorClass::Revert, ++ raw::GITERR_CALLBACK => super::ErrorClass::Callback, ++ raw::GITERR_CHERRYPICK => super::ErrorClass::CherryPick, ++ raw::GITERR_DESCRIBE => super::ErrorClass::Describe, ++ raw::GITERR_REBASE => super::ErrorClass::Rebase, ++ raw::GITERR_FILESYSTEM => super::ErrorClass::Filesystem, ++ _ => super::ErrorClass::None, ++ } ++ } ++ ++ /// Return the raw error code associated with this error. ++ pub fn raw_code(&self) -> raw::git_error_code { ++ macro_rules! check( ($($e:ident,)*) => ( ++ $(if self.code == raw::$e as c_int { raw::$e }) else * ++ else { ++ raw::GIT_ERROR ++ } ++ ) ); ++ check!( ++ GIT_OK, ++ GIT_ERROR, ++ GIT_ENOTFOUND, ++ GIT_EEXISTS, ++ GIT_EAMBIGUOUS, ++ GIT_EBUFS, ++ GIT_EUSER, ++ GIT_EBAREREPO, ++ GIT_EUNBORNBRANCH, ++ GIT_EUNMERGED, ++ GIT_ENONFASTFORWARD, ++ GIT_EINVALIDSPEC, ++ GIT_ECONFLICT, ++ GIT_ELOCKED, ++ GIT_EMODIFIED, ++ GIT_EAUTH, ++ GIT_ECERTIFICATE, ++ GIT_EAPPLIED, ++ GIT_EPEEL, ++ GIT_EEOF, ++ GIT_EINVALID, ++ GIT_EUNCOMMITTED, ++ GIT_PASSTHROUGH, ++ GIT_ITEROVER, ++ ) ++ } ++ ++ /// Return the raw error class associated with this error. ++ pub fn raw_class(&self) -> raw::git_error_t { ++ macro_rules! check( ($($e:ident,)*) => ( ++ $(if self.klass == raw::$e as c_int { raw::$e }) else * ++ else { ++ raw::GITERR_NONE ++ } ++ ) ); ++ check!( ++ GITERR_NONE, ++ GITERR_NOMEMORY, ++ GITERR_OS, ++ GITERR_INVALID, ++ GITERR_REFERENCE, ++ GITERR_ZLIB, ++ GITERR_REPOSITORY, ++ GITERR_CONFIG, ++ GITERR_REGEX, ++ GITERR_ODB, ++ GITERR_INDEX, ++ GITERR_OBJECT, ++ GITERR_NET, ++ GITERR_TAG, ++ GITERR_TREE, ++ GITERR_INDEXER, ++ GITERR_SSL, ++ GITERR_SUBMODULE, ++ GITERR_THREAD, ++ GITERR_STASH, ++ GITERR_CHECKOUT, ++ GITERR_FETCHHEAD, ++ GITERR_MERGE, ++ GITERR_SSH, ++ GITERR_FILTER, ++ GITERR_REVERT, ++ GITERR_CALLBACK, ++ GITERR_CHERRYPICK, ++ GITERR_DESCRIBE, ++ GITERR_REBASE, ++ GITERR_FILESYSTEM, ++ ) ++ } ++ ++ /// Return the message associated with this error ++ pub fn message(&self) -> &str { &self.message } ++} ++ ++impl error::Error for Error { ++ fn description(&self) -> &str { &self.message } ++} ++ ++impl fmt::Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{}", self.message)?; ++ match self.class() { ++ ErrorClass::None => {} ++ other => write!(f, "; class={:?} ({})", other, self.klass)?, ++ } ++ match self.code() { ++ ErrorCode::GenericError => {} ++ other => write!(f, "; code={:?} ({})", other, self.code)?, ++ } ++ Ok(()) ++ } ++} ++ ++impl From for Error { ++ fn from(_: NulError) -> Error { ++ Error::from_str("data contained a nul byte that could not be \ ++ represented as a string") ++ } ++} ++ ++impl From for Error { ++ fn from(e: JoinPathsError) -> Error { ++ Error::from_str(error::Error::description(&e)) ++ } ++} ++ ++ ++#[cfg(test)] ++mod tests { ++ use {ErrorClass, ErrorCode}; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ let err = repo.find_submodule("does_not_exist").err().unwrap(); ++ assert_eq!(err.code(), ErrorCode::NotFound); ++ assert_eq!(err.class(), ErrorClass::Submodule); ++ } ++} diff --cc vendor/git2-0.7.5/src/index.rs index 000000000,000000000..0f603a5b0 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/index.rs @@@ -1,0 -1,0 +1,634 @@@ ++use std::ffi::{CStr, OsString, CString}; ++use std::ops::Range; ++use std::path::Path; ++use std::ptr; ++use std::slice; ++ ++use libc::{c_int, c_uint, size_t, c_void, c_char}; ++ ++use {raw, panic, Repository, Error, Tree, Oid, IndexAddOption, IndexTime}; ++use IntoCString; ++use util::{self, Binding}; ++ ++/// A structure to represent a git [index][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects ++pub struct Index { ++ raw: *mut raw::git_index, ++} ++ ++/// An iterator over the entries in an index ++pub struct IndexEntries<'index> { ++ range: Range, ++ index: &'index Index, ++} ++ ++/// A callback function to filter index matches. ++/// ++/// Used by `Index::{add_all,remove_all,update_all}`. The first argument is the ++/// path, and the second is the patchspec that matched it. Return 0 to confirm ++/// the operation on the item, > 0 to skip the item, and < 0 to abort the scan. ++pub type IndexMatchedPath<'a> = FnMut(&Path, &[u8]) -> i32 + 'a; ++ ++/// A structure to represent an entry or a file inside of an index. ++/// ++/// All fields of an entry are public for modification and inspection. This is ++/// also how a new index entry is created. ++#[allow(missing_docs)] ++pub struct IndexEntry { ++ pub ctime: IndexTime, ++ pub mtime: IndexTime, ++ pub dev: u32, ++ pub ino: u32, ++ pub mode: u32, ++ pub uid: u32, ++ pub gid: u32, ++ pub file_size: u32, ++ pub id: Oid, ++ pub flags: u16, ++ pub flags_extended: u16, ++ pub path: Vec, ++} ++ ++impl Index { ++ /// Creates a new in-memory index. ++ /// ++ /// This index object cannot be read/written to the filesystem, but may be ++ /// used to perform in-memory index operations. ++ pub fn new() -> Result { ++ ::init(); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_index_new(&mut raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new bare Git index object as a memory representation of the Git ++ /// index file in 'index_path', without a repository to back it. ++ /// ++ /// Since there is no ODB or working directory behind this index, any Index ++ /// methods which rely on these (e.g. add_path) will fail. ++ /// ++ /// If you need an index attached to a repository, use the `index()` method ++ /// on `Repository`. ++ pub fn open(index_path: &Path) -> Result { ++ ::init(); ++ let mut raw = ptr::null_mut(); ++ let index_path = try!(index_path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_index_open(&mut raw, index_path)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Add or update an index entry from an in-memory struct ++ /// ++ /// If a previous index entry exists that has the same path and stage as the ++ /// given 'source_entry', it will be replaced. Otherwise, the 'source_entry' ++ /// will be added. ++ pub fn add(&mut self, entry: &IndexEntry) -> Result<(), Error> { ++ let path = try!(CString::new(&entry.path[..])); ++ ++ // libgit2 encodes the length of the path in the lower bits of the ++ // `flags` entry, so mask those out and recalculate here to ensure we ++ // don't corrupt anything. ++ let mut flags = entry.flags & !raw::GIT_IDXENTRY_NAMEMASK; ++ ++ if entry.path.len() < raw::GIT_IDXENTRY_NAMEMASK as usize { ++ flags |= entry.path.len() as u16; ++ } else { ++ flags |= raw::GIT_IDXENTRY_NAMEMASK; ++ } ++ ++ unsafe { ++ let raw = raw::git_index_entry { ++ dev: entry.dev, ++ ino: entry.ino, ++ mode: entry.mode, ++ uid: entry.uid, ++ gid: entry.gid, ++ file_size: entry.file_size, ++ id: *entry.id.raw(), ++ flags: flags, ++ flags_extended: entry.flags_extended, ++ path: path.as_ptr(), ++ mtime: raw::git_index_time { ++ seconds: entry.mtime.seconds(), ++ nanoseconds: entry.mtime.nanoseconds(), ++ }, ++ ctime: raw::git_index_time { ++ seconds: entry.ctime.seconds(), ++ nanoseconds: entry.ctime.nanoseconds(), ++ }, ++ }; ++ try_call!(raw::git_index_add(self.raw, &raw)); ++ Ok(()) ++ } ++ } ++ ++ /// Add or update an index entry from a file on disk ++ /// ++ /// The file path must be relative to the repository's working folder and ++ /// must be readable. ++ /// ++ /// This method will fail in bare index instances. ++ /// ++ /// This forces the file to be added to the index, not looking at gitignore ++ /// rules. ++ /// ++ /// If this file currently is the result of a merge conflict, this file will ++ /// no longer be marked as conflicting. The data about the conflict will be ++ /// moved to the "resolve undo" (REUC) section. ++ pub fn add_path(&mut self, path: &Path) -> Result<(), Error> { ++ // Git apparently expects '/' to be separators for paths ++ let mut posix_path = OsString::new(); ++ for (i, comp) in path.components().enumerate() { ++ if i != 0 { posix_path.push("/"); } ++ posix_path.push(comp.as_os_str()); ++ } ++ let posix_path = try!(posix_path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_index_add_bypath(self.raw, posix_path)); ++ Ok(()) ++ } ++ } ++ ++ /// Add or update index entries matching files in the working directory. ++ /// ++ /// This method will fail in bare index instances. ++ /// ++ /// The `pathspecs` are a list of file names or shell glob patterns that ++ /// will matched against files in the repository's working directory. Each ++ /// file that matches will be added to the index (either updating an ++ /// existing entry or adding a new entry). You can disable glob expansion ++ /// and force exact matching with the `AddDisablePathspecMatch` flag. ++ /// ++ /// Files that are ignored will be skipped (unlike `add_path`). If a file is ++ /// already tracked in the index, then it will be updated even if it is ++ /// ignored. Pass the `AddForce` flag to skip the checking of ignore rules. ++ /// ++ /// To emulate `git add -A` and generate an error if the pathspec contains ++ /// the exact path of an ignored file (when not using `AddForce`), add the ++ /// `AddCheckPathspec` flag. This checks that each entry in `pathspecs` ++ /// that is an exact match to a filename on disk is either not ignored or ++ /// already in the index. If this check fails, the function will return ++ /// an error. ++ /// ++ /// To emulate `git add -A` with the "dry-run" option, just use a callback ++ /// function that always returns a positive value. See below for details. ++ /// ++ /// If any files are currently the result of a merge conflict, those files ++ /// will no longer be marked as conflicting. The data about the conflicts ++ /// will be moved to the "resolve undo" (REUC) section. ++ /// ++ /// If you provide a callback function, it will be invoked on each matching ++ /// item in the working directory immediately before it is added to / ++ /// updated in the index. Returning zero will add the item to the index, ++ /// greater than zero will skip the item, and less than zero will abort the ++ /// scan an return an error to the caller. ++ pub fn add_all(&mut self, ++ pathspecs: I, ++ flag: IndexAddOption, ++ mut cb: Option<&mut IndexMatchedPath>) ++ -> Result<(), Error> ++ where T: IntoCString, I: IntoIterator, ++ { ++ let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); ++ let ptr = cb.as_mut(); ++ let callback = ptr.as_ref().map(|_| { ++ index_matched_path_cb as raw::git_index_matched_path_cb ++ }); ++ unsafe { ++ try_call!(raw::git_index_add_all(self.raw, ++ &raw_strarray, ++ flag.bits() as c_uint, ++ callback, ++ ptr.map(|p| p as *mut _) ++ .unwrap_or(ptr::null_mut()) ++ as *mut c_void)); ++ } ++ Ok(()) ++ } ++ ++ /// Clear the contents (all the entries) of an index object. ++ /// ++ /// This clears the index object in memory; changes must be explicitly ++ /// written to disk for them to take effect persistently via `write_*`. ++ pub fn clear(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_index_clear(self.raw)); } ++ Ok(()) ++ } ++ ++ /// Get the count of entries currently in the index ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_index_entrycount(&*self.raw) as usize } ++ } ++ ++ /// Return `true` is there is no entry in the index ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Get one of the entries in the index by its position. ++ pub fn get(&self, n: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_index_get_byindex(self.raw, n as size_t); ++ if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} ++ } ++ } ++ ++ /// Get an iterator over the entries in this index. ++ pub fn iter(&self) -> IndexEntries { ++ IndexEntries { range: 0..self.len(), index: self } ++ } ++ ++ /// Get one of the entries in the index by its path. ++ pub fn get_path(&self, path: &Path, stage: i32) -> Option { ++ let path = path.into_c_string().unwrap(); ++ unsafe { ++ let ptr = call!(raw::git_index_get_bypath(self.raw, path, ++ stage as c_int)); ++ if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} ++ } ++ } ++ ++ /// Does this index have conflicts? ++ /// ++ /// Returns `true` if the index contains conflicts, `false` if it does not. ++ pub fn has_conflicts(&self) -> bool { ++ unsafe { ++ raw::git_index_has_conflicts(self.raw) == 1 ++ } ++ } ++ ++ /// Get the full path to the index file on disk. ++ /// ++ /// Returns `None` if this is an in-memory index. ++ pub fn path(&self) -> Option<&Path> { ++ unsafe { ++ ::opt_bytes(self, raw::git_index_path(&*self.raw)).map(util::bytes2path) ++ } ++ } ++ ++ /// Update the contents of an existing index object in memory by reading ++ /// from the hard disk. ++ /// ++ /// If force is true, this performs a "hard" read that discards in-memory ++ /// changes and always reloads the on-disk index data. If there is no ++ /// on-disk version, the index will be cleared. ++ /// ++ /// If force is false, this does a "soft" read that reloads the index data ++ /// from disk only if it has changed since the last time it was loaded. ++ /// Purely in-memory index data will be untouched. Be aware: if there are ++ /// changes on disk, unwritten in-memory changes are discarded. ++ pub fn read(&mut self, force: bool) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_index_read(self.raw, force)); } ++ Ok(()) ++ } ++ ++ /// Read a tree into the index file with stats ++ /// ++ /// The current index contents will be replaced by the specified tree. ++ pub fn read_tree(&mut self, tree: &Tree) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_index_read_tree(self.raw, &*tree.raw())); } ++ Ok(()) ++ } ++ ++ /// Remove an entry from the index ++ pub fn remove(&mut self, path: &Path, stage: i32) -> Result<(), Error> { ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_index_remove(self.raw, path, stage as c_int)); ++ } ++ Ok(()) ++ } ++ ++ /// Remove an index entry corresponding to a file on disk. ++ /// ++ /// The file path must be relative to the repository's working folder. It ++ /// may exist. ++ /// ++ /// If this file currently is the result of a merge conflict, this file will ++ /// no longer be marked as conflicting. The data about the conflict will be ++ /// moved to the "resolve undo" (REUC) section. ++ pub fn remove_path(&mut self, path: &Path) -> Result<(), Error> { ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_index_remove_bypath(self.raw, path)); ++ } ++ Ok(()) ++ } ++ ++ /// Remove all entries from the index under a given directory. ++ pub fn remove_dir(&mut self, path: &Path, stage: i32) -> Result<(), Error> { ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_index_remove_directory(self.raw, path, ++ stage as c_int)); ++ } ++ Ok(()) ++ } ++ ++ /// Remove all matching index entries. ++ /// ++ /// If you provide a callback function, it will be invoked on each matching ++ /// item in the index immediately before it is removed. Return 0 to remove ++ /// the item, > 0 to skip the item, and < 0 to abort the scan. ++ pub fn remove_all(&mut self, ++ pathspecs: I, ++ mut cb: Option<&mut IndexMatchedPath>) ++ -> Result<(), Error> ++ where T: IntoCString, I: IntoIterator, ++ { ++ let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); ++ let ptr = cb.as_mut(); ++ let callback = ptr.as_ref().map(|_| { ++ index_matched_path_cb as raw::git_index_matched_path_cb ++ }); ++ unsafe { ++ try_call!(raw::git_index_remove_all(self.raw, ++ &raw_strarray, ++ callback, ++ ptr.map(|p| p as *mut _) ++ .unwrap_or(ptr::null_mut()) ++ as *mut c_void)); ++ } ++ Ok(()) ++ } ++ ++ /// Update all index entries to match the working directory ++ /// ++ /// This method will fail in bare index instances. ++ /// ++ /// This scans the existing index entries and synchronizes them with the ++ /// working directory, deleting them if the corresponding working directory ++ /// file no longer exists otherwise updating the information (including ++ /// adding the latest version of file to the ODB if needed). ++ /// ++ /// If you provide a callback function, it will be invoked on each matching ++ /// item in the index immediately before it is updated (either refreshed or ++ /// removed depending on working directory state). Return 0 to proceed with ++ /// updating the item, > 0 to skip the item, and < 0 to abort the scan. ++ pub fn update_all(&mut self, ++ pathspecs: I, ++ mut cb: Option<&mut IndexMatchedPath>) ++ -> Result<(), Error> ++ where T: IntoCString, I: IntoIterator, ++ { ++ let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); ++ let ptr = cb.as_mut(); ++ let callback = ptr.as_ref().map(|_| { ++ index_matched_path_cb as raw::git_index_matched_path_cb ++ }); ++ unsafe { ++ try_call!(raw::git_index_update_all(self.raw, ++ &raw_strarray, ++ callback, ++ ptr.map(|p| p as *mut _) ++ .unwrap_or(ptr::null_mut()) ++ as *mut c_void)); ++ } ++ Ok(()) ++ } ++ ++ /// Write an existing index object from memory back to disk using an atomic ++ /// file lock. ++ pub fn write(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_index_write(self.raw)); } ++ Ok(()) ++ } ++ ++ /// Write the index as a tree. ++ /// ++ /// This method will scan the index and write a representation of its ++ /// current state back to disk; it recursively creates tree objects for each ++ /// of the subtrees stored in the index, but only returns the OID of the ++ /// root tree. This is the OID that can be used e.g. to create a commit. ++ /// ++ /// The index instance cannot be bare, and needs to be associated to an ++ /// existing repository. ++ /// ++ /// The index must not contain any file in conflict. ++ pub fn write_tree(&mut self) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_index_write_tree(&mut raw, self.raw)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Write the index as a tree to the given repository ++ /// ++ /// This is the same as `write_tree` except that the destination repository ++ /// can be chosen. ++ pub fn write_tree_to(&mut self, repo: &Repository) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_index_write_tree_to(&mut raw, self.raw, ++ repo.raw())); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++} ++ ++impl Binding for Index { ++ type Raw = *mut raw::git_index; ++ unsafe fn from_raw(raw: *mut raw::git_index) -> Index { ++ Index { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_index { self.raw } ++} ++ ++extern fn index_matched_path_cb(path: *const c_char, ++ matched_pathspec: *const c_char, ++ payload: *mut c_void) -> c_int { ++ unsafe { ++ let path = CStr::from_ptr(path).to_bytes(); ++ let matched_pathspec = CStr::from_ptr(matched_pathspec).to_bytes(); ++ ++ panic::wrap(|| { ++ let payload = payload as *mut &mut IndexMatchedPath; ++ (*payload)(util::bytes2path(path), matched_pathspec) as c_int ++ }).unwrap_or(-1) ++ } ++} ++ ++impl Drop for Index { ++ fn drop(&mut self) { ++ unsafe { raw::git_index_free(self.raw) } ++ } ++} ++ ++impl<'index> Iterator for IndexEntries<'index> { ++ type Item = IndexEntry; ++ fn next(&mut self) -> Option { ++ self.range.next().map(|i| self.index.get(i).unwrap()) ++ } ++} ++ ++impl Binding for IndexEntry { ++ type Raw = raw::git_index_entry; ++ ++ unsafe fn from_raw(raw: raw::git_index_entry) -> IndexEntry { ++ let raw::git_index_entry { ++ ctime, mtime, dev, ino, mode, uid, gid, file_size, id, flags, ++ flags_extended, path ++ } = raw; ++ ++ // libgit2 encodes the length of the path in the lower bits of `flags`, ++ // but if the length exceeds the number of bits then the path is ++ // nul-terminated. ++ let mut pathlen = (flags & raw::GIT_IDXENTRY_NAMEMASK) as usize; ++ if pathlen == raw::GIT_IDXENTRY_NAMEMASK as usize { ++ pathlen = CStr::from_ptr(path).to_bytes().len(); ++ } ++ ++ let path = slice::from_raw_parts(path as *const u8, pathlen); ++ ++ IndexEntry { ++ dev: dev, ++ ino: ino, ++ mode: mode, ++ uid: uid, ++ gid: gid, ++ file_size: file_size, ++ id: Binding::from_raw(&id as *const _), ++ flags: flags, ++ flags_extended: flags_extended, ++ path: path.to_vec(), ++ mtime: Binding::from_raw(mtime), ++ ctime: Binding::from_raw(ctime), ++ } ++ } ++ ++ fn raw(&self) -> raw::git_index_entry { ++ // not implemented, may require a CString in storage ++ panic!() ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::{self, File}; ++ use std::path::Path; ++ use tempdir::TempDir; ++ ++ use {Index, IndexEntry, Repository, ResetType, Oid, IndexTime}; ++ ++ #[test] ++ fn smoke() { ++ let mut index = Index::new().unwrap(); ++ assert!(index.add_path(&Path::new(".")).is_err()); ++ index.clear().unwrap(); ++ assert_eq!(index.len(), 0); ++ assert!(index.get(0).is_none()); ++ assert!(index.path().is_none()); ++ assert!(index.read(true).is_err()); ++ } ++ ++ #[test] ++ fn smoke_from_repo() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut index = repo.index().unwrap(); ++ assert_eq!(index.path().map(|s| s.to_path_buf()), ++ Some(repo.path().join("index"))); ++ Index::open(&repo.path().join("index")).unwrap(); ++ ++ index.clear().unwrap(); ++ index.read(true).unwrap(); ++ index.write().unwrap(); ++ index.write_tree().unwrap(); ++ index.write_tree_to(&repo).unwrap(); ++ } ++ ++ #[test] ++ fn add_all() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut index = repo.index().unwrap(); ++ ++ let root = repo.path().parent().unwrap(); ++ fs::create_dir(&root.join("foo")).unwrap(); ++ File::create(&root.join("foo/bar")).unwrap(); ++ let mut called = false; ++ index.add_all(["foo"].iter(), ::IndexAddOption::DEFAULT, ++ Some(&mut |a: &Path, b: &[u8]| { ++ assert!(!called); ++ called = true; ++ assert_eq!(b, b"foo"); ++ assert_eq!(a, Path::new("foo/bar")); ++ 0 ++ })).unwrap(); ++ assert!(called); ++ ++ called = false; ++ index.remove_all(["."].iter(), Some(&mut |a: &Path, b: &[u8]| { ++ assert!(!called); ++ called = true; ++ assert_eq!(b, b"."); ++ assert_eq!(a, Path::new("foo/bar")); ++ 0 ++ })).unwrap(); ++ assert!(called); ++ } ++ ++ #[test] ++ fn smoke_add() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut index = repo.index().unwrap(); ++ ++ let root = repo.path().parent().unwrap(); ++ fs::create_dir(&root.join("foo")).unwrap(); ++ File::create(&root.join("foo/bar")).unwrap(); ++ index.add_path(Path::new("foo/bar")).unwrap(); ++ index.write().unwrap(); ++ assert_eq!(index.iter().count(), 1); ++ ++ // Make sure we can use this repo somewhere else now. ++ let id = index.write_tree().unwrap(); ++ let tree = repo.find_tree(id).unwrap(); ++ let sig = repo.signature().unwrap(); ++ let id = repo.refname_to_id("HEAD").unwrap(); ++ let parent = repo.find_commit(id).unwrap(); ++ let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", ++ &tree, &[&parent]).unwrap(); ++ let obj = repo.find_object(commit, None).unwrap(); ++ repo.reset(&obj, ResetType::Hard, None).unwrap(); ++ ++ let td2 = TempDir::new("git").unwrap(); ++ let url = ::test::path2url(&root); ++ let repo = Repository::clone(&url, td2.path()).unwrap(); ++ let obj = repo.find_object(commit, None).unwrap(); ++ repo.reset(&obj, ResetType::Hard, None).unwrap(); ++ } ++ ++ #[test] ++ fn add_then_read() { ++ let mut index = Index::new().unwrap(); ++ assert!(index.add(&entry()).is_err()); ++ ++ let mut index = Index::new().unwrap(); ++ let mut e = entry(); ++ e.path = b"foobar".to_vec(); ++ index.add(&e).unwrap(); ++ let e = index.get(0).unwrap(); ++ assert_eq!(e.path.len(), 6); ++ } ++ ++ fn entry() -> IndexEntry { ++ IndexEntry { ++ ctime: IndexTime::new(0, 0), ++ mtime: IndexTime::new(0, 0), ++ dev: 0, ++ ino: 0, ++ mode: 0o100644, ++ uid: 0, ++ gid: 0, ++ file_size: 0, ++ id: Oid::from_bytes(&[0; 20]).unwrap(), ++ flags: 0, ++ flags_extended: 0, ++ path: Vec::new(), ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/lib.rs index 000000000,000000000..8a6c64b94 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/lib.rs @@@ -1,0 -1,0 +1,1337 @@@ ++//! # libgit2 bindings for Rust ++//! ++//! This library contains bindings to the [libgit2][1] C library which is used ++//! to manage git repositories. The library itself is a work in progress and is ++//! likely lacking some bindings here and there, so be warned. ++//! ++//! [1]: https://libgit2.github.com/ ++//! ++//! The git2-rs library strives to be as close to libgit2 as possible, but also ++//! strives to make using libgit2 as safe as possible. All resource management ++//! is automatic as well as adding strong types to all interfaces (including ++//! `Result`) ++//! ++//! ## Creating a `Repository` ++//! ++//! The `Repository` is the source from which almost all other objects in git-rs ++//! are spawned. A repository can be created through opening, initializing, or ++//! cloning. ++//! ++//! ### Initializing a new repository ++//! ++//! The `init` method will create a new repository, assuming one does not ++//! already exist. ++//! ++//! ```no_run ++//! # #![allow(unstable)] ++//! use git2::Repository; ++//! ++//! let repo = match Repository::init("/path/to/a/repo") { ++//! Ok(repo) => repo, ++//! Err(e) => panic!("failed to init: {}", e), ++//! }; ++//! ``` ++//! ++//! ### Opening an existing repository ++//! ++//! ```no_run ++//! # #![allow(unstable)] ++//! use git2::Repository; ++//! ++//! let repo = match Repository::open("/path/to/a/repo") { ++//! Ok(repo) => repo, ++//! Err(e) => panic!("failed to open: {}", e), ++//! }; ++//! ``` ++//! ++//! ### Cloning an existing repository ++//! ++//! ```no_run ++//! # #![allow(unstable)] ++//! use git2::Repository; ++//! ++//! let url = "https://github.com/alexcrichton/git2-rs"; ++//! let repo = match Repository::clone(url, "/path/to/a/repo") { ++//! Ok(repo) => repo, ++//! Err(e) => panic!("failed to clone: {}", e), ++//! }; ++//! ``` ++//! ++//! ## Working with a `Repository` ++//! ++//! All deriviative objects, references, etc are attached to the lifetime of the ++//! source `Repository`, to ensure that they do not outlive the repository ++//! itself. ++ ++#![doc(html_root_url = "https://docs.rs/git2/0.6")] ++#![allow(trivial_numeric_casts, trivial_casts)] ++#![deny(missing_docs)] ++#![cfg_attr(test, deny(warnings))] ++ ++extern crate libc; ++extern crate url; ++extern crate libgit2_sys as raw; ++#[macro_use] extern crate bitflags; ++#[macro_use] extern crate log; ++#[cfg(test)] extern crate tempdir; ++ ++use std::ffi::{CStr, CString}; ++use std::fmt; ++use std::str; ++use std::sync::{Once, ONCE_INIT}; ++ ++pub use blame::{Blame, BlameHunk, BlameIter, BlameOptions}; ++pub use blob::{Blob, BlobWriter}; ++pub use branch::{Branch, Branches}; ++pub use buf::Buf; ++pub use commit::{Commit, Parents}; ++pub use config::{Config, ConfigEntry, ConfigEntries}; ++pub use cred::{Cred, CredentialHelper}; ++pub use describe::{Describe, DescribeFormatOptions, DescribeOptions}; ++pub use diff::{Diff, DiffDelta, DiffFile, DiffOptions, Deltas}; ++pub use diff::{DiffBinary, DiffBinaryFile, DiffBinaryKind}; ++pub use diff::{DiffLine, DiffHunk, DiffStats, DiffFindOptions}; ++pub use error::Error; ++pub use index::{Index, IndexEntry, IndexEntries, IndexMatchedPath}; ++pub use merge::{AnnotatedCommit, MergeOptions}; ++pub use message::{message_prettify, DEFAULT_COMMENT_CHAR}; ++pub use note::{Note, Notes}; ++pub use object::Object; ++pub use oid::Oid; ++pub use packbuilder::{PackBuilder, PackBuilderStage}; ++pub use pathspec::{Pathspec, PathspecMatchList, PathspecFailedEntries}; ++pub use pathspec::{PathspecDiffEntries, PathspecEntries}; ++pub use patch::Patch; ++pub use proxy_options::ProxyOptions; ++pub use reference::{Reference, References, ReferenceNames}; ++pub use reflog::{Reflog, ReflogEntry, ReflogIter}; ++pub use refspec::Refspec; ++pub use remote::{Remote, RemoteConnection, Refspecs, RemoteHead, FetchOptions, PushOptions}; ++pub use remote_callbacks::{RemoteCallbacks, Credentials, TransferProgress}; ++pub use remote_callbacks::{TransportMessage, Progress, UpdateTips}; ++pub use repo::{Repository, RepositoryInitOptions}; ++pub use revspec::Revspec; ++pub use revwalk::Revwalk; ++pub use signature::Signature; ++pub use status::{StatusOptions, Statuses, StatusIter, StatusEntry, StatusShow}; ++pub use stash::{StashApplyOptions, StashCb, StashApplyProgressCb}; ++pub use submodule::{Submodule, SubmoduleUpdateOptions}; ++pub use tag::Tag; ++pub use time::{Time, IndexTime}; ++pub use tree::{Tree, TreeEntry, TreeIter}; ++pub use treebuilder::TreeBuilder; ++pub use odb::{Odb, OdbObject, OdbReader, OdbWriter}; ++pub use util::IntoCString; ++ ++// Create a convinience method on bitflag struct which checks the given flag ++macro_rules! is_bit_set { ++ ($name:ident, $flag:expr) => ( ++ #[allow(missing_docs)] ++ pub fn $name(&self) -> bool { ++ self.intersects($flag) ++ } ++ ) ++} ++ ++/// An enumeration of possible errors that can happen when working with a git ++/// repository. ++#[derive(PartialEq, Eq, Clone, Debug, Copy)] ++pub enum ErrorCode { ++ /// Generic error ++ GenericError, ++ /// Requested object could not be found ++ NotFound, ++ /// Object exists preventing operation ++ Exists, ++ /// More than one object matches ++ Ambiguous, ++ /// Output buffer too short to hold data ++ BufSize, ++ /// User-generated error ++ User, ++ /// Operation not allowed on bare repository ++ BareRepo, ++ /// HEAD refers to branch with no commits ++ UnbornBranch, ++ /// Merge in progress prevented operation ++ Unmerged, ++ /// Reference was not fast-forwardable ++ NotFastForward, ++ /// Name/ref spec was not in a valid format ++ InvalidSpec, ++ /// Checkout conflicts prevented operation ++ Conflict, ++ /// Lock file prevented operation ++ Locked, ++ /// Reference value does not match expected ++ Modified, ++ /// Authentication error ++ Auth, ++ /// Server certificate is invalid ++ Certificate, ++ /// Patch/merge has already been applied ++ Applied, ++ /// The requested peel operation is not possible ++ Peel, ++ /// Unexpected EOF ++ Eof, ++ /// Invalid operation or input ++ Invalid, ++ /// Uncommitted changes in index prevented operation ++ Uncommitted, ++ /// Operation was not valid for a directory, ++ Directory, ++} ++ ++/// An enumeration of possible categories of things that can have ++/// errors when working with a git repository. ++#[derive(PartialEq, Eq, Clone, Debug, Copy)] ++pub enum ErrorClass { ++ /// Uncategorized ++ None, ++ /// Out of memory or insufficient allocated space ++ NoMemory, ++ /// Syscall or standard system library error ++ Os, ++ /// Invalid input ++ Invalid, ++ /// Error resolving or manipulating a reference ++ Reference, ++ /// ZLib failure ++ Zlib, ++ /// Bad repository state ++ Repository, ++ /// Bad configuration ++ Config, ++ /// Regex failure ++ Regex, ++ /// Bad object ++ Odb, ++ /// Invalid index data ++ Index, ++ /// Error creating or obtaining an object ++ Object, ++ /// Network error ++ Net, ++ /// Error manpulating a tag ++ Tag, ++ /// Invalid value in tree ++ Tree, ++ /// Hashing or packing error ++ Indexer, ++ /// Error from SSL ++ Ssl, ++ /// Error involing submodules ++ Submodule, ++ /// Threading error ++ Thread, ++ /// Error manipulating a stash ++ Stash, ++ /// Checkout failure ++ Checkout, ++ /// Invalid FETCH_HEAD ++ FetchHead, ++ /// Merge failure ++ Merge, ++ /// SSH failure ++ Ssh, ++ /// Error manipulating filters ++ Filter, ++ /// Error reverting commit ++ Revert, ++ /// Error from a user callback ++ Callback, ++ /// Error cherry-picking commit ++ CherryPick, ++ /// Can't describe object ++ Describe, ++ /// Error during rebase ++ Rebase, ++ /// Filesystem-related error ++ Filesystem, ++} ++ ++/// A listing of the possible states that a repository can be in. ++#[derive(PartialEq, Eq, Clone, Debug, Copy)] ++#[allow(missing_docs)] ++pub enum RepositoryState { ++ Clean, ++ Merge, ++ Revert, ++ RevertSequence, ++ CherryPick, ++ CherryPickSequence, ++ Bisect, ++ Rebase, ++ RebaseInteractive, ++ RebaseMerge, ++ ApplyMailbox, ++ ApplyMailboxOrRebase, ++} ++ ++/// An enumeration of the possible directions for a remote. ++#[derive(Copy, Clone)] ++pub enum Direction { ++ /// Data will be fetched (read) from this remote. ++ Fetch, ++ /// Data will be pushed (written) to this remote. ++ Push, ++} ++ ++/// An enumeration of the operations that can be performed for the `reset` ++/// method on a `Repository`. ++#[derive(Copy, Clone)] ++pub enum ResetType { ++ /// Move the head to the given commit. ++ Soft, ++ /// Soft plus reset the index to the commit. ++ Mixed, ++ /// Mixed plus changes in the working tree are discarded. ++ Hard, ++} ++ ++/// An enumeration all possible kinds objects may have. ++#[derive(PartialEq, Eq, Copy, Clone, Debug)] ++pub enum ObjectType { ++ /// Any kind of git object ++ Any, ++ /// An object which corresponds to a git commit ++ Commit, ++ /// An object which corresponds to a git tree ++ Tree, ++ /// An object which corresponds to a git blob ++ Blob, ++ /// An object which corresponds to a git tag ++ Tag, ++} ++ ++/// An enumeration of all possile kinds of references. ++#[derive(PartialEq, Eq, Copy, Clone, Debug)] ++pub enum ReferenceType { ++ /// A reference which points at an object id. ++ Oid, ++ ++ /// A reference which points at another reference. ++ Symbolic, ++} ++ ++/// An enumeration for the possible types of branches ++#[derive(PartialEq, Eq, Debug, Copy, Clone)] ++pub enum BranchType { ++ /// A local branch not on a remote. ++ Local, ++ /// A branch for a remote. ++ Remote, ++} ++ ++/// An enumeration of the possible priority levels of a config file. ++/// ++/// The levels corresponding to the escalation logic (higher to lower) when ++/// searching for config entries. ++#[derive(PartialEq, Eq, Debug, Copy, Clone)] ++pub enum ConfigLevel { ++ /// System-wide on Windows, for compatibility with portable git ++ ProgramData, ++ /// System-wide configuration file, e.g. /etc/gitconfig ++ System, ++ /// XDG-compatible configuration file, e.g. ~/.config/git/config ++ XDG, ++ /// User-specific configuration, e.g. ~/.gitconfig ++ Global, ++ /// Repository specific config, e.g. $PWD/.git/config ++ Local, ++ /// Application specific configuration file ++ App, ++ /// Highest level available ++ Highest, ++} ++ ++/// Merge file favor options for `MergeOptions` instruct the file-level ++/// merging functionality how to deal with conflicting regions of the files. ++#[derive(PartialEq, Eq, Debug, Copy, Clone)] ++pub enum FileFavor { ++ /// When a region of a file is changed in both branches, a conflict will be ++ /// recorded in the index so that git_checkout can produce a merge file with ++ /// conflict markers in the working directory. This is the default. ++ Normal, ++ /// When a region of a file is changed in both branches, the file created ++ /// in the index will contain the "ours" side of any conflicting region. ++ /// The index will not record a conflict. ++ Ours, ++ /// When a region of a file is changed in both branches, the file created ++ /// in the index will contain the "theirs" side of any conflicting region. ++ /// The index will not record a conflict. ++ Theirs, ++ /// When a region of a file is changed in both branches, the file created ++ /// in the index will contain each unique line from each side, which has ++ /// the result of combining both files. The index will not record a conflict. ++ Union, ++} ++ ++bitflags! { ++ /// Orderings that may be specified for Revwalk iteration. ++ pub struct Sort: u32 { ++ /// Sort the repository contents in no particular ordering. ++ /// ++ /// This sorting is arbitrary, implementation-specific, and subject to ++ /// change at any time. This is the default sorting for new walkers. ++ const NONE = raw::GIT_SORT_NONE as u32; ++ ++ /// Sort the repository contents in topological order (parents before ++ /// children). ++ /// ++ /// This sorting mode can be combined with time sorting. ++ const TOPOLOGICAL = raw::GIT_SORT_TOPOLOGICAL as u32; ++ ++ /// Sort the repository contents by commit time. ++ /// ++ /// This sorting mode can be combined with topological sorting. ++ const TIME = raw::GIT_SORT_TIME as u32; ++ ++ /// Iterate through the repository contents in reverse order. ++ /// ++ /// This sorting mode can be combined with any others. ++ const REVERSE = raw::GIT_SORT_REVERSE as u32; ++ } ++} ++ ++impl Sort { ++ is_bit_set!(is_none, Sort::NONE); ++ is_bit_set!(is_topological, Sort::TOPOLOGICAL); ++ is_bit_set!(is_time, Sort::TIME); ++ is_bit_set!(is_reverse, Sort::REVERSE); ++} ++ ++bitflags! { ++ /// Types of credentials that can be requested by a credential callback. ++ pub struct CredentialType: u32 { ++ #[allow(missing_docs)] ++ const USER_PASS_PLAINTEXT = raw::GIT_CREDTYPE_USERPASS_PLAINTEXT as u32; ++ #[allow(missing_docs)] ++ const SSH_KEY = raw::GIT_CREDTYPE_SSH_KEY as u32; ++ #[allow(missing_docs)] ++ const SSH_MEMORY = raw::GIT_CREDTYPE_SSH_MEMORY as u32; ++ #[allow(missing_docs)] ++ const SSH_CUSTOM = raw::GIT_CREDTYPE_SSH_CUSTOM as u32; ++ #[allow(missing_docs)] ++ const DEFAULT = raw::GIT_CREDTYPE_DEFAULT as u32; ++ #[allow(missing_docs)] ++ const SSH_INTERACTIVE = raw::GIT_CREDTYPE_SSH_INTERACTIVE as u32; ++ #[allow(missing_docs)] ++ const USERNAME = raw::GIT_CREDTYPE_USERNAME as u32; ++ } ++} ++ ++impl CredentialType { ++ is_bit_set!(is_user_pass_plaintext, CredentialType::USER_PASS_PLAINTEXT); ++ is_bit_set!(is_ssh_key, CredentialType::SSH_KEY); ++ is_bit_set!(is_ssh_memory, CredentialType::SSH_MEMORY); ++ is_bit_set!(is_ssh_custom, CredentialType::SSH_CUSTOM); ++ is_bit_set!(is_default, CredentialType::DEFAULT); ++ is_bit_set!(is_ssh_interactive, CredentialType::SSH_INTERACTIVE); ++ is_bit_set!(is_username, CredentialType::USERNAME); ++} ++ ++impl Default for CredentialType { ++ fn default() -> Self { ++ CredentialType::DEFAULT ++ } ++} ++ ++bitflags! { ++ /// Flags for the `flags` field of an IndexEntry. ++ pub struct IndexEntryFlag: u16 { ++ /// Set when the `extended_flags` field is valid. ++ const EXTENDED = raw::GIT_IDXENTRY_EXTENDED as u16; ++ /// "Assume valid" flag ++ const VALID = raw::GIT_IDXENTRY_VALID as u16; ++ } ++} ++ ++impl IndexEntryFlag { ++ is_bit_set!(is_extended, IndexEntryFlag::EXTENDED); ++ is_bit_set!(is_valid, IndexEntryFlag::VALID); ++} ++ ++bitflags! { ++ /// Flags for the `extended_flags` field of an IndexEntry. ++ pub struct IndexEntryExtendedFlag: u16 { ++ /// An "intent to add" entry from "git add -N" ++ const INTENT_TO_ADD = raw::GIT_IDXENTRY_INTENT_TO_ADD as u16; ++ /// Skip the associated worktree file, for sparse checkouts ++ const SKIP_WORKTREE = raw::GIT_IDXENTRY_SKIP_WORKTREE as u16; ++ /// Reserved for a future on-disk extended flag ++ const EXTENDED2 = raw::GIT_IDXENTRY_EXTENDED2 as u16; ++ ++ #[allow(missing_docs)] ++ const UPDATE = raw::GIT_IDXENTRY_UPDATE as u16; ++ #[allow(missing_docs)] ++ const REMOVE = raw::GIT_IDXENTRY_REMOVE as u16; ++ #[allow(missing_docs)] ++ const UPTODATE = raw::GIT_IDXENTRY_UPTODATE as u16; ++ #[allow(missing_docs)] ++ const ADDED = raw::GIT_IDXENTRY_ADDED as u16; ++ ++ #[allow(missing_docs)] ++ const HASHED = raw::GIT_IDXENTRY_HASHED as u16; ++ #[allow(missing_docs)] ++ const UNHASHED = raw::GIT_IDXENTRY_UNHASHED as u16; ++ #[allow(missing_docs)] ++ const WT_REMOVE = raw::GIT_IDXENTRY_WT_REMOVE as u16; ++ #[allow(missing_docs)] ++ const CONFLICTED = raw::GIT_IDXENTRY_CONFLICTED as u16; ++ ++ #[allow(missing_docs)] ++ const UNPACKED = raw::GIT_IDXENTRY_UNPACKED as u16; ++ #[allow(missing_docs)] ++ const NEW_SKIP_WORKTREE = raw::GIT_IDXENTRY_NEW_SKIP_WORKTREE as u16; ++ } ++} ++ ++impl IndexEntryExtendedFlag { ++ is_bit_set!(is_intent_to_add, IndexEntryExtendedFlag::INTENT_TO_ADD); ++ is_bit_set!(is_skip_worktree, IndexEntryExtendedFlag::SKIP_WORKTREE); ++ is_bit_set!(is_extended2, IndexEntryExtendedFlag::EXTENDED2); ++ is_bit_set!(is_update, IndexEntryExtendedFlag::UPDATE); ++ is_bit_set!(is_remove, IndexEntryExtendedFlag::REMOVE); ++ is_bit_set!(is_up_to_date, IndexEntryExtendedFlag::UPTODATE); ++ is_bit_set!(is_added, IndexEntryExtendedFlag::ADDED); ++ is_bit_set!(is_hashed, IndexEntryExtendedFlag::HASHED); ++ is_bit_set!(is_unhashed, IndexEntryExtendedFlag::UNHASHED); ++ is_bit_set!(is_wt_remove, IndexEntryExtendedFlag::WT_REMOVE); ++ is_bit_set!(is_conflicted, IndexEntryExtendedFlag::CONFLICTED); ++ is_bit_set!(is_unpacked, IndexEntryExtendedFlag::UNPACKED); ++ is_bit_set!(is_new_skip_worktree, IndexEntryExtendedFlag::NEW_SKIP_WORKTREE); ++} ++ ++bitflags! { ++ /// Flags for APIs that add files matching pathspec ++ pub struct IndexAddOption: u32 { ++ #[allow(missing_docs)] ++ const DEFAULT = raw::GIT_INDEX_ADD_DEFAULT as u32; ++ #[allow(missing_docs)] ++ const FORCE = raw::GIT_INDEX_ADD_FORCE as u32; ++ #[allow(missing_docs)] ++ const DISABLE_PATHSPEC_MATCH = ++ raw::GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH as u32; ++ #[allow(missing_docs)] ++ const CHECK_PATHSPEC = raw::GIT_INDEX_ADD_CHECK_PATHSPEC as u32; ++ } ++} ++ ++impl IndexAddOption { ++ is_bit_set!(is_default, IndexAddOption::DEFAULT); ++ is_bit_set!(is_force, IndexAddOption::FORCE); ++ is_bit_set!(is_disable_pathspec_match, IndexAddOption::DISABLE_PATHSPEC_MATCH); ++ is_bit_set!(is_check_pathspec, IndexAddOption::CHECK_PATHSPEC); ++} ++ ++impl Default for IndexAddOption { ++ fn default() -> Self { ++ IndexAddOption::DEFAULT ++ } ++} ++ ++bitflags! { ++ /// Flags for `Repository::open_ext` ++ pub struct RepositoryOpenFlags: u32 { ++ /// Only open the specified path; don't walk upward searching. ++ const NO_SEARCH = raw::GIT_REPOSITORY_OPEN_NO_SEARCH as u32; ++ /// Search across filesystem boundaries. ++ const CROSS_FS = raw::GIT_REPOSITORY_OPEN_CROSS_FS as u32; ++ /// Force opening as bare repository, and defer loading its config. ++ const BARE = raw::GIT_REPOSITORY_OPEN_BARE as u32; ++ /// Don't try appending `/.git` to the specified repository path. ++ const NO_DOTGIT = raw::GIT_REPOSITORY_OPEN_NO_DOTGIT as u32; ++ /// Respect environment variables like `$GIT_DIR`. ++ const FROM_ENV = raw::GIT_REPOSITORY_OPEN_FROM_ENV as u32; ++ } ++} ++ ++impl RepositoryOpenFlags { ++ is_bit_set!(is_no_search, RepositoryOpenFlags::NO_SEARCH); ++ is_bit_set!(is_cross_fs, RepositoryOpenFlags::CROSS_FS); ++ is_bit_set!(is_bare, RepositoryOpenFlags::BARE); ++ is_bit_set!(is_no_dotgit, RepositoryOpenFlags::NO_DOTGIT); ++ is_bit_set!(is_from_env, RepositoryOpenFlags::FROM_ENV); ++} ++ ++bitflags! { ++ /// Flags for the return value of `Repository::revparse` ++ pub struct RevparseMode: u32 { ++ /// The spec targeted a single object ++ const SINGLE = raw::GIT_REVPARSE_SINGLE as u32; ++ /// The spec targeted a range of commits ++ const RANGE = raw::GIT_REVPARSE_RANGE as u32; ++ /// The spec used the `...` operator, which invokes special semantics. ++ const MERGE_BASE = raw::GIT_REVPARSE_MERGE_BASE as u32; ++ } ++} ++ ++impl RevparseMode { ++ is_bit_set!(is_no_single, RevparseMode::SINGLE); ++ is_bit_set!(is_range, RevparseMode::RANGE); ++ is_bit_set!(is_merge_base, RevparseMode::MERGE_BASE); ++} ++ ++bitflags! { ++ /// The results of `merge_analysis` indicating the merge opportunities. ++ pub struct MergeAnalysis: u32 { ++ /// No merge is possible. ++ const ANALYSIS_NONE = raw::GIT_MERGE_ANALYSIS_NONE as u32; ++ /// A "normal" merge; both HEAD and the given merge input have diverged ++ /// from their common ancestor. The divergent commits must be merged. ++ const ANALYSIS_NORMAL = raw::GIT_MERGE_ANALYSIS_NORMAL as u32; ++ /// All given merge inputs are reachable from HEAD, meaning the ++ /// repository is up-to-date and no merge needs to be performed. ++ const ANALYSIS_UP_TO_DATE = raw::GIT_MERGE_ANALYSIS_UP_TO_DATE as u32; ++ /// The given merge input is a fast-forward from HEAD and no merge ++ /// needs to be performed. Instead, the client can check out the ++ /// given merge input. ++ const ANALYSIS_FASTFORWARD = raw::GIT_MERGE_ANALYSIS_FASTFORWARD as u32; ++ /// The HEAD of the current repository is "unborn" and does not point to ++ /// a valid commit. No merge can be performed, but the caller may wish ++ /// to simply set HEAD to the target commit(s). ++ const ANALYSIS_UNBORN = raw::GIT_MERGE_ANALYSIS_UNBORN as u32; ++ } ++} ++ ++impl MergeAnalysis { ++ is_bit_set!(is_none, MergeAnalysis::ANALYSIS_NONE); ++ is_bit_set!(is_normal, MergeAnalysis::ANALYSIS_NORMAL); ++ is_bit_set!(is_up_to_date, MergeAnalysis::ANALYSIS_UP_TO_DATE); ++ is_bit_set!(is_fast_forward, MergeAnalysis::ANALYSIS_FASTFORWARD); ++ is_bit_set!(is_unborn, MergeAnalysis::ANALYSIS_UNBORN); ++} ++ ++bitflags! { ++ /// The user's stated preference for merges. ++ pub struct MergePreference: u32 { ++ /// No configuration was found that suggests a preferred behavior for ++ /// merge. ++ const NONE = raw::GIT_MERGE_PREFERENCE_NONE as u32; ++ /// There is a `merge.ff=false` configuration setting, suggesting that ++ /// the user does not want to allow a fast-forward merge. ++ const NO_FAST_FORWARD = raw::GIT_MERGE_PREFERENCE_NO_FASTFORWARD as u32; ++ /// There is a `merge.ff=only` configuration setting, suggesting that ++ /// the user only wants fast-forward merges. ++ const FASTFORWARD_ONLY = raw::GIT_MERGE_PREFERENCE_FASTFORWARD_ONLY as u32; ++ } ++} ++ ++impl MergePreference { ++ is_bit_set!(is_none, MergePreference::NONE); ++ is_bit_set!(is_no_fast_forward, MergePreference::NO_FAST_FORWARD); ++ is_bit_set!(is_fastforward_only, MergePreference::FASTFORWARD_ONLY); ++} ++ ++#[cfg(test)] #[macro_use] mod test; ++#[macro_use] mod panic; ++mod call; ++mod util; ++ ++pub mod build; ++pub mod cert; ++pub mod string_array; ++pub mod oid_array; ++pub mod transport; ++ ++mod blame; ++mod blob; ++mod branch; ++mod buf; ++mod commit; ++mod config; ++mod cred; ++mod describe; ++mod diff; ++mod error; ++mod index; ++mod merge; ++mod message; ++mod note; ++mod object; ++mod odb; ++mod oid; ++mod packbuilder; ++mod pathspec; ++mod patch; ++mod proxy_options; ++mod reference; ++mod reflog; ++mod refspec; ++mod remote; ++mod remote_callbacks; ++mod repo; ++mod revspec; ++mod revwalk; ++mod signature; ++mod status; ++mod submodule; ++mod stash; ++mod tag; ++mod time; ++mod tree; ++mod treebuilder; ++ ++fn init() { ++ static INIT: Once = ONCE_INIT; ++ ++ INIT.call_once(|| { ++ openssl_env_init(); ++ }); ++ ++ raw::init(); ++} ++ ++#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), feature = "https"))] ++fn openssl_env_init() { ++ extern crate openssl_probe; ++ ++ // Currently, libgit2 leverages OpenSSL for SSL support when cloning ++ // repositories over HTTPS. This means that we're picking up an OpenSSL ++ // dependency on non-Windows platforms (where it has its own HTTPS ++ // subsystem). As a result, we need to link to OpenSSL. ++ // ++ // Now actually *linking* to OpenSSL isn't so hard. We just need to make ++ // sure to use pkg-config to discover any relevant system dependencies for ++ // differences between distributions like CentOS and Ubuntu. The actual ++ // trickiness comes about when we start *distributing* the resulting ++ // binaries. Currently Cargo is distributed in binary form as nightlies, ++ // which means we're distributing a binary with OpenSSL linked in. ++ // ++ // For historical reasons, the Linux nightly builder is running a CentOS ++ // distribution in order to have as much ABI compatibility with other ++ // distributions as possible. Sadly, however, this compatibility does not ++ // extend to OpenSSL. Currently OpenSSL has two major versions, 0.9 and 1.0, ++ // which are incompatible (many ABI differences). The CentOS builder we ++ // build on has version 1.0, as do most distributions today. Some still have ++ // 0.9, however. This means that if we are to distribute the binaries built ++ // by the CentOS machine, we would only be compatible with OpenSSL 1.0 and ++ // we would fail to run (a dynamic linker error at runtime) on systems with ++ // only 9.8 installed (hopefully). ++ // ++ // But wait, the plot thickens! Apparently CentOS has dubbed their OpenSSL ++ // library as `libssl.so.10`, notably the `10` is included at the end. On ++ // the other hand Ubuntu, for example, only distributes `libssl.so`. This ++ // means that the binaries created at CentOS are hard-wired to probe for a ++ // file called `libssl.so.10` at runtime (using the LD_LIBRARY_PATH), which ++ // will not be found on ubuntu. The conclusion of this is that binaries ++ // built on CentOS cannot be distributed to Ubuntu and run successfully. ++ // ++ // There are a number of sneaky things we could do, including, but not ++ // limited to: ++ // ++ // 1. Create a shim program which runs "just before" cargo runs. The ++ // responsibility of this shim program would be to locate `libssl.so`, ++ // whatever it's called, on the current system, make sure there's a ++ // symlink *somewhere* called `libssl.so.10`, and then set up ++ // LD_LIBRARY_PATH and run the actual cargo. ++ // ++ // This approach definitely seems unconventional, and is borderline ++ // overkill for this problem. It's also dubious if we can find a ++ // libssl.so reliably on the target system. ++ // ++ // 2. Somehow re-work the CentOS installation so that the linked-against ++ // library is called libssl.so instead of libssl.so.10 ++ // ++ // The problem with this approach is that systems with 0.9 installed will ++ // start to silently fail, due to also having libraries called libssl.so ++ // (probably symlinked under a more appropriate version). ++ // ++ // 3. Compile Cargo against both OpenSSL 1.0 *and* OpenSSL 0.9, and ++ // distribute both. Also make sure that the linked-against name of the ++ // library is `libssl.so`. At runtime we determine which version is ++ // installed, and we then the appropriate binary. ++ // ++ // This approach clearly has drawbacks in terms of infrastructure and ++ // feasibility. ++ // ++ // 4. Build a nightly of Cargo for each distribution we'd like to support. ++ // You would then pick the appropriate Cargo nightly to install locally. ++ // ++ // So, with all this in mind, the decision was made to *statically* link ++ // OpenSSL. This solves any problem of relying on a downstream OpenSSL ++ // version being available. This does, however, open a can of worms related ++ // to security issues. It's generally a good idea to dynamically link ++ // OpenSSL as you'll get security updates over time without having to do ++ // anything (the system administrator will update the local openssl ++ // package). By statically linking, we're forfeiting this feature. ++ // ++ // The conclusion was made it is likely appropriate for the Cargo nightlies ++ // to statically link OpenSSL, but highly encourage distributions and ++ // packagers of Cargo to dynamically link OpenSSL. Packagers are targeting ++ // one system and are distributing to only that system, so none of the ++ // problems mentioned above would arise. ++ // ++ // In order to support this, a new package was made: openssl-static-sys. ++ // This package currently performs a fairly simple task: ++ // ++ // 1. Run pkg-config to discover where openssl is installed. ++ // 2. If openssl is installed in a nonstandard location, *and* static copies ++ // of the libraries are available, copy them to $OUT_DIR. ++ // ++ // This library will bring in libssl.a and libcrypto.a into the local build, ++ // allowing them to be picked up by this crate. This allows us to configure ++ // our own buildbots to have pkg-config point to these local pre-built ++ // copies of a static OpenSSL (with very few dependencies) while allowing ++ // most other builds of Cargo to naturally dynamically link OpenSSL. ++ // ++ // So in summary, if you're with me so far, we've statically linked OpenSSL ++ // to the Cargo binary (or any binary, for that matter) and we're ready to ++ // distribute it to *all* linux distributions. Remember that our original ++ // intent for openssl was for HTTPS support, which implies that we need some ++ // for of CA certificate store to validate certificates. This is normally ++ // installed in a standard system location. ++ // ++ // Unfortunately, as one might imagine, OpenSSL is configured for where this ++ // standard location is at *build time*, but it often varies widely ++ // per-system. Consequently, it was discovered that OpenSSL will respect the ++ // SSL_CERT_FILE and SSL_CERT_DIR environment variables in order to assist ++ // in discovering the location of this file (hurray!). ++ // ++ // So, finally getting to the point, this function solely exists to support ++ // our static builds of OpenSSL by probing for the "standard system ++ // location" of certificates and setting relevant environment variable to ++ // point to them. ++ // ++ // Ah, and as a final note, this is only a problem on Linux, not on OS X. On ++ // OS X the OpenSSL binaries are stable enough that we can just rely on ++ // dynamic linkage (plus they have some weird modifications to OpenSSL which ++ // means we wouldn't want to link statically). ++ openssl_probe::init_ssl_cert_env_vars(); ++} ++ ++#[cfg(any(windows, target_os = "macos", target_os = "ios", not(feature = "https")))] ++fn openssl_env_init() {} ++ ++unsafe fn opt_bytes<'a, T>(_anchor: &'a T, ++ c: *const libc::c_char) -> Option<&'a [u8]> { ++ if c.is_null() { ++ None ++ } else { ++ Some(CStr::from_ptr(c).to_bytes()) ++ } ++} ++ ++fn opt_cstr(o: Option) -> Result, Error> { ++ match o { ++ Some(s) => s.into_c_string().map(Some), ++ None => Ok(None) ++ } ++} ++ ++impl ObjectType { ++ /// Convert an object type to its string representation. ++ pub fn str(&self) -> &'static str { ++ unsafe { ++ let ptr = call!(raw::git_object_type2string(*self)) as *const _; ++ let data = CStr::from_ptr(ptr).to_bytes(); ++ str::from_utf8(data).unwrap() ++ } ++ } ++ ++ /// Determine if the given git_otype is a valid loose object type. ++ pub fn is_loose(&self) -> bool { ++ unsafe { (call!(raw::git_object_typeisloose(*self)) == 1) } ++ } ++ ++ /// Convert a raw git_otype to an ObjectType ++ pub fn from_raw(raw: raw::git_otype) -> Option { ++ match raw { ++ raw::GIT_OBJ_ANY => Some(ObjectType::Any), ++ raw::GIT_OBJ_COMMIT => Some(ObjectType::Commit), ++ raw::GIT_OBJ_TREE => Some(ObjectType::Tree), ++ raw::GIT_OBJ_BLOB => Some(ObjectType::Blob), ++ raw::GIT_OBJ_TAG => Some(ObjectType::Tag), ++ _ => None, ++ } ++ } ++ ++ /// Convert this kind into its raw representation ++ pub fn raw(&self) -> raw::git_otype { ++ call::convert(self) ++ } ++ ++ /// Convert a string object type representation to its object type. ++ pub fn from_str(s: &str) -> Option { ++ let raw = unsafe { call!(raw::git_object_string2type(CString::new(s).unwrap())) }; ++ ObjectType::from_raw(raw) ++ } ++} ++ ++impl fmt::Display for ObjectType { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.str().fmt(f) ++ } ++} ++ ++impl ReferenceType { ++ /// Convert an object type to its string representation. ++ pub fn str(&self) -> &'static str { ++ match self { ++ &ReferenceType::Oid => "oid", ++ &ReferenceType::Symbolic => "symbolic", ++ } ++ } ++ ++ /// Convert a raw git_ref_t to a ReferenceType. ++ pub fn from_raw(raw: raw::git_ref_t) -> Option { ++ match raw { ++ raw::GIT_REF_OID => Some(ReferenceType::Oid), ++ raw::GIT_REF_SYMBOLIC => Some(ReferenceType::Symbolic), ++ _ => None, ++ } ++ } ++} ++ ++impl fmt::Display for ReferenceType { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.str().fmt(f) ++ } ++} ++ ++impl ConfigLevel { ++ /// Converts a raw configuration level to a ConfigLevel ++ pub fn from_raw(raw: raw::git_config_level_t) -> ConfigLevel { ++ match raw { ++ raw::GIT_CONFIG_LEVEL_PROGRAMDATA => ConfigLevel::ProgramData, ++ raw::GIT_CONFIG_LEVEL_SYSTEM => ConfigLevel::System, ++ raw::GIT_CONFIG_LEVEL_XDG => ConfigLevel::XDG, ++ raw::GIT_CONFIG_LEVEL_GLOBAL => ConfigLevel::Global, ++ raw::GIT_CONFIG_LEVEL_LOCAL => ConfigLevel::Local, ++ raw::GIT_CONFIG_LEVEL_APP => ConfigLevel::App, ++ raw::GIT_CONFIG_HIGHEST_LEVEL => ConfigLevel::Highest, ++ n => panic!("unknown config level: {}", n), ++ } ++ } ++} ++ ++bitflags! { ++ /// Status flags for a single file ++ /// ++ /// A combination of these values will be returned to indicate the status of ++ /// a file. Status compares the working directory, the index, and the ++ /// current HEAD of the repository. The `STATUS_INDEX_*` set of flags ++ /// represents the status of file in the index relative to the HEAD, and the ++ /// `STATUS_WT_*` set of flags represent the status of the file in the ++ /// working directory relative to the index. ++ pub struct Status: u32 { ++ #[allow(missing_docs)] ++ const CURRENT = raw::GIT_STATUS_CURRENT as u32; ++ ++ #[allow(missing_docs)] ++ const INDEX_NEW = raw::GIT_STATUS_INDEX_NEW as u32; ++ #[allow(missing_docs)] ++ const INDEX_MODIFIED = raw::GIT_STATUS_INDEX_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const INDEX_DELETED = raw::GIT_STATUS_INDEX_DELETED as u32; ++ #[allow(missing_docs)] ++ const INDEX_RENAMED = raw::GIT_STATUS_INDEX_RENAMED as u32; ++ #[allow(missing_docs)] ++ const INDEX_TYPECHANGE = raw::GIT_STATUS_INDEX_TYPECHANGE as u32; ++ ++ #[allow(missing_docs)] ++ const WT_NEW = raw::GIT_STATUS_WT_NEW as u32; ++ #[allow(missing_docs)] ++ const WT_MODIFIED = raw::GIT_STATUS_WT_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const WT_DELETED = raw::GIT_STATUS_WT_DELETED as u32; ++ #[allow(missing_docs)] ++ const WT_TYPECHANGE = raw::GIT_STATUS_WT_TYPECHANGE as u32; ++ #[allow(missing_docs)] ++ const WT_RENAMED = raw::GIT_STATUS_WT_RENAMED as u32; ++ ++ #[allow(missing_docs)] ++ const IGNORED = raw::GIT_STATUS_IGNORED as u32; ++ #[allow(missing_docs)] ++ const CONFLICTED = raw::GIT_STATUS_CONFLICTED as u32; ++ } ++} ++ ++impl Status { ++ is_bit_set!(is_index_new, Status::INDEX_NEW); ++ is_bit_set!(is_index_modified, Status::INDEX_MODIFIED); ++ is_bit_set!(is_index_deleted, Status::INDEX_DELETED); ++ is_bit_set!(is_index_renamed, Status::INDEX_RENAMED); ++ is_bit_set!(is_index_typechange, Status::INDEX_TYPECHANGE); ++ is_bit_set!(is_wt_new, Status::WT_NEW); ++ is_bit_set!(is_wt_modified, Status::WT_MODIFIED); ++ is_bit_set!(is_wt_deleted, Status::WT_DELETED); ++ is_bit_set!(is_wt_typechange, Status::WT_TYPECHANGE); ++ is_bit_set!(is_wt_renamed, Status::WT_RENAMED); ++ is_bit_set!(is_ignored, Status::IGNORED); ++ is_bit_set!(is_conflicted, Status::CONFLICTED); ++} ++ ++bitflags! { ++ /// Mode options for RepositoryInitOptions ++ pub struct RepositoryInitMode: u32 { ++ /// Use permissions configured by umask - the default ++ const SHARED_UMASK = raw::GIT_REPOSITORY_INIT_SHARED_UMASK as u32; ++ /// Use `--shared=group` behavior, chmod'ing the new repo to be ++ /// group writable and \"g+sx\" for sticky group assignment ++ const SHARED_GROUP = raw::GIT_REPOSITORY_INIT_SHARED_GROUP as u32; ++ /// Use `--shared=all` behavior, adding world readability. ++ const SHARED_ALL = raw::GIT_REPOSITORY_INIT_SHARED_ALL as u32; ++ } ++} ++ ++impl RepositoryInitMode { ++ is_bit_set!(is_shared_umask, RepositoryInitMode::SHARED_UMASK); ++ is_bit_set!(is_shared_group, RepositoryInitMode::SHARED_GROUP); ++ is_bit_set!(is_shared_all, RepositoryInitMode::SHARED_ALL); ++} ++ ++/// What type of change is described by a `DiffDelta`? ++#[derive(Copy, Clone, Debug, PartialEq, Eq)] ++pub enum Delta { ++ /// No changes ++ Unmodified, ++ /// Entry does not exist in old version ++ Added, ++ /// Entry does not exist in new version ++ Deleted, ++ /// Entry content changed between old and new ++ Modified, ++ /// Entry was renamed between old and new ++ Renamed, ++ /// Entry was copied from another old entry ++ Copied, ++ /// Entry is ignored item in workdir ++ Ignored, ++ /// Entry is untracked item in workdir ++ Untracked, ++ /// Type of entry changed between old and new ++ Typechange, ++ /// Entry is unreadable ++ Unreadable, ++ /// Entry in the index is conflicted ++ Conflicted, ++} ++ ++bitflags! { ++ /// Return codes for submodule status. ++ /// ++ /// A combination of these flags will be returned to describe the status of a ++ /// submodule. Depending on the "ignore" property of the submodule, some of ++ /// the flags may never be returned because they indicate changes that are ++ /// supposed to be ignored. ++ /// ++ /// Submodule info is contained in 4 places: the HEAD tree, the index, config ++ /// files (both .git/config and .gitmodules), and the working directory. Any ++ /// or all of those places might be missing information about the submodule ++ /// depending on what state the repo is in. We consider all four places to ++ /// build the combination of status flags. ++ /// ++ /// There are four values that are not really status, but give basic info ++ /// about what sources of submodule data are available. These will be ++ /// returned even if ignore is set to "ALL". ++ /// ++ /// * IN_HEAD - superproject head contains submodule ++ /// * IN_INDEX - superproject index contains submodule ++ /// * IN_CONFIG - superproject gitmodules has submodule ++ /// * IN_WD - superproject workdir has submodule ++ /// ++ /// The following values will be returned so long as ignore is not "ALL". ++ /// ++ /// * INDEX_ADDED - in index, not in head ++ /// * INDEX_DELETED - in head, not in index ++ /// * INDEX_MODIFIED - index and head don't match ++ /// * WD_UNINITIALIZED - workdir contains empty directory ++ /// * WD_ADDED - in workdir, not index ++ /// * WD_DELETED - in index, not workdir ++ /// * WD_MODIFIED - index and workdir head don't match ++ /// ++ /// The following can only be returned if ignore is "NONE" or "UNTRACKED". ++ /// ++ /// * WD_INDEX_MODIFIED - submodule workdir index is dirty ++ /// * WD_WD_MODIFIED - submodule workdir has modified files ++ /// ++ /// Lastly, the following will only be returned for ignore "NONE". ++ /// ++ /// * WD_UNTRACKED - wd contains untracked files ++ pub struct SubmoduleStatus: u32 { ++ #[allow(missing_docs)] ++ const IN_HEAD = raw::GIT_SUBMODULE_STATUS_IN_HEAD as u32; ++ #[allow(missing_docs)] ++ const IN_INDEX = raw::GIT_SUBMODULE_STATUS_IN_INDEX as u32; ++ #[allow(missing_docs)] ++ const IN_CONFIG = raw::GIT_SUBMODULE_STATUS_IN_CONFIG as u32; ++ #[allow(missing_docs)] ++ const IN_WD = raw::GIT_SUBMODULE_STATUS_IN_WD as u32; ++ #[allow(missing_docs)] ++ const INDEX_ADDED = raw::GIT_SUBMODULE_STATUS_INDEX_ADDED as u32; ++ #[allow(missing_docs)] ++ const INDEX_DELETED = raw::GIT_SUBMODULE_STATUS_INDEX_DELETED as u32; ++ #[allow(missing_docs)] ++ const INDEX_MODIFIED = raw::GIT_SUBMODULE_STATUS_INDEX_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const WD_UNINITIALIZED = ++ raw::GIT_SUBMODULE_STATUS_WD_UNINITIALIZED as u32; ++ #[allow(missing_docs)] ++ const WD_ADDED = raw::GIT_SUBMODULE_STATUS_WD_ADDED as u32; ++ #[allow(missing_docs)] ++ const WD_DELETED = raw::GIT_SUBMODULE_STATUS_WD_DELETED as u32; ++ #[allow(missing_docs)] ++ const WD_MODIFIED = raw::GIT_SUBMODULE_STATUS_WD_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const WD_INDEX_MODIFIED = ++ raw::GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const WD_WD_MODIFIED = raw::GIT_SUBMODULE_STATUS_WD_WD_MODIFIED as u32; ++ #[allow(missing_docs)] ++ const WD_UNTRACKED = raw::GIT_SUBMODULE_STATUS_WD_UNTRACKED as u32; ++ } ++} ++ ++impl SubmoduleStatus { ++ is_bit_set!(is_in_head, SubmoduleStatus::IN_HEAD); ++ is_bit_set!(is_in_index, SubmoduleStatus::IN_INDEX); ++ is_bit_set!(is_in_config, SubmoduleStatus::IN_CONFIG); ++ is_bit_set!(is_in_wd, SubmoduleStatus::IN_WD); ++ is_bit_set!(is_index_added, SubmoduleStatus::INDEX_ADDED); ++ is_bit_set!(is_index_deleted, SubmoduleStatus::INDEX_DELETED); ++ is_bit_set!(is_index_modified, SubmoduleStatus::INDEX_MODIFIED); ++ is_bit_set!(is_wd_uninitialized, SubmoduleStatus::WD_UNINITIALIZED); ++ is_bit_set!(is_wd_added, SubmoduleStatus::WD_ADDED); ++ is_bit_set!(is_wd_deleted, SubmoduleStatus::WD_DELETED); ++ is_bit_set!(is_wd_modified, SubmoduleStatus::WD_MODIFIED); ++ is_bit_set!(is_wd_wd_modified, SubmoduleStatus::WD_WD_MODIFIED); ++ is_bit_set!(is_wd_untracked, SubmoduleStatus::WD_UNTRACKED); ++} ++ ++/// Submodule ignore values ++/// ++/// These values represent settings for the `submodule.$name.ignore` ++/// configuration value which says how deeply to look at the working ++/// directory when getting the submodule status. ++pub enum SubmoduleIgnore { ++ /// Use the submodule's configuration ++ Unspecified, ++ /// Any change or untracked file is considered dirty ++ None, ++ /// Only dirty if tracked files have changed ++ Untracked, ++ /// Only dirty if HEAD has moved ++ Dirty, ++ /// Never dirty ++ All, ++} ++ ++bitflags! { ++ /// ... ++ pub struct PathspecFlags: u32 { ++ /// Use the default pathspec matching configuration. ++ const DEFAULT = raw::GIT_PATHSPEC_DEFAULT as u32; ++ /// Force matching to ignore case, otherwise matching will use native ++ /// case sensitivity fo the platform filesystem. ++ const IGNORE_CASE = raw::GIT_PATHSPEC_IGNORE_CASE as u32; ++ /// Force case sensitive matches, otherwise match will use the native ++ /// case sensitivity of the platform filesystem. ++ const USE_CASE = raw::GIT_PATHSPEC_USE_CASE as u32; ++ /// Disable glob patterns and just use simple string comparison for ++ /// matching. ++ const NO_GLOB = raw::GIT_PATHSPEC_NO_GLOB as u32; ++ /// Means that match functions return the error code `NotFound` if no ++ /// matches are found. By default no matches is a success. ++ const NO_MATCH_ERROR = raw::GIT_PATHSPEC_NO_MATCH_ERROR as u32; ++ /// Means that the list returned should track which patterns matched ++ /// which files so that at the end of the match we can identify patterns ++ /// that did not match any files. ++ const FIND_FAILURES = raw::GIT_PATHSPEC_FIND_FAILURES as u32; ++ /// Means that the list returned does not need to keep the actual ++ /// matching filenames. Use this to just test if there were any matches ++ /// at all or in combination with `PATHSPEC_FAILURES` to validate a ++ /// pathspec. ++ const FAILURES_ONLY = raw::GIT_PATHSPEC_FAILURES_ONLY as u32; ++ } ++} ++ ++impl PathspecFlags { ++ is_bit_set!(is_default, PathspecFlags::DEFAULT); ++ is_bit_set!(is_ignore_case, PathspecFlags::IGNORE_CASE); ++ is_bit_set!(is_use_case, PathspecFlags::USE_CASE); ++ is_bit_set!(is_no_glob, PathspecFlags::NO_GLOB); ++ is_bit_set!(is_no_match_error, PathspecFlags::NO_MATCH_ERROR); ++ is_bit_set!(is_find_failures, PathspecFlags::FIND_FAILURES); ++ is_bit_set!(is_failures_only, PathspecFlags::FAILURES_ONLY); ++} ++ ++impl Default for PathspecFlags { ++ fn default() -> Self { ++ PathspecFlags::DEFAULT ++ } ++} ++ ++bitflags! { ++ /// Types of notifications emitted from checkouts. ++ pub struct CheckoutNotificationType: u32 { ++ /// Notification about a conflict. ++ const CONFLICT = raw::GIT_CHECKOUT_NOTIFY_CONFLICT as u32; ++ /// Notification about a dirty file. ++ const DIRTY = raw::GIT_CHECKOUT_NOTIFY_DIRTY as u32; ++ /// Notification about an updated file. ++ const UPDATED = raw::GIT_CHECKOUT_NOTIFY_UPDATED as u32; ++ /// Notification about an untracked file. ++ const UNTRACKED = raw::GIT_CHECKOUT_NOTIFY_UNTRACKED as u32; ++ /// Notification about an ignored file. ++ const IGNORED = raw::GIT_CHECKOUT_NOTIFY_IGNORED as u32; ++ } ++} ++ ++impl CheckoutNotificationType { ++ is_bit_set!(is_conflict, CheckoutNotificationType::CONFLICT); ++ is_bit_set!(is_dirty, CheckoutNotificationType::DIRTY); ++ is_bit_set!(is_updated, CheckoutNotificationType::UPDATED); ++ is_bit_set!(is_untracked, CheckoutNotificationType::UNTRACKED); ++ is_bit_set!(is_ignored, CheckoutNotificationType::IGNORED); ++} ++ ++/// Possible output formats for diff data ++#[derive(Copy, Clone)] ++pub enum DiffFormat { ++ /// full git diff ++ Patch, ++ /// just the headers of the patch ++ PatchHeader, ++ /// like git diff --raw ++ Raw, ++ /// like git diff --name-only ++ NameOnly, ++ /// like git diff --name-status ++ NameStatus, ++} ++ ++bitflags! { ++ /// Formatting options for diff stats ++ pub struct DiffStatsFormat: raw::git_diff_stats_format_t { ++ /// Don't generate any stats ++ const NONE = raw::GIT_DIFF_STATS_NONE; ++ /// Equivalent of `--stat` in git ++ const FULL = raw::GIT_DIFF_STATS_FULL; ++ /// Equivalent of `--shortstat` in git ++ const SHORT = raw::GIT_DIFF_STATS_SHORT; ++ /// Equivalent of `--numstat` in git ++ const NUMBER = raw::GIT_DIFF_STATS_NUMBER; ++ /// Extended header information such as creations, renames and mode ++ /// changes, equivalent of `--summary` in git ++ const INCLUDE_SUMMARY = raw::GIT_DIFF_STATS_INCLUDE_SUMMARY; ++ } ++} ++ ++impl DiffStatsFormat { ++ is_bit_set!(is_none, DiffStatsFormat::NONE); ++ is_bit_set!(is_full, DiffStatsFormat::FULL); ++ is_bit_set!(is_short, DiffStatsFormat::SHORT); ++ is_bit_set!(is_number, DiffStatsFormat::NUMBER); ++ is_bit_set!(is_include_summary, DiffStatsFormat::INCLUDE_SUMMARY); ++} ++ ++/// Automatic tag following options. ++pub enum AutotagOption { ++ /// Use the setting from the remote's configuration ++ Unspecified, ++ /// Ask the server for tags pointing to objects we're already downloading ++ Auto, ++ /// Don't ask for any tags beyond the refspecs ++ None, ++ /// Ask for all the tags ++ All, ++} ++ ++/// Configuration for how pruning is done on a fetch ++pub enum FetchPrune { ++ /// Use the setting from the configuration ++ Unspecified, ++ /// Force pruning on ++ On, ++ /// Force pruning off ++ Off, ++} ++ ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum StashApplyProgress { ++ /// None ++ None, ++ /// Loading the stashed data from the object database ++ LoadingStash, ++ /// The stored index is being analyzed ++ AnalyzeIndex, ++ /// The modified files are being analyzed ++ AnalyzeModified, ++ /// The untracked and ignored files are being analyzed ++ AnalyzeUntracked, ++ /// The untracked files are being written to disk ++ CheckoutUntracked, ++ /// The modified files are being written to disk ++ CheckoutModified, ++ /// The stash was applied successfully ++ Done, ++} ++ ++bitflags! { ++ #[allow(missing_docs)] ++ pub struct StashApplyFlags: u32 { ++ #[allow(missing_docs)] ++ const DEFAULT = raw::GIT_STASH_APPLY_DEFAULT as u32; ++ /// Try to reinstate not only the working tree's changes, ++ /// but also the index's changes. ++ const REINSTATE_INDEX = raw::GIT_STASH_APPLY_REINSTATE_INDEX as u32; ++ } ++} ++ ++impl StashApplyFlags { ++ is_bit_set!(is_default, StashApplyFlags::DEFAULT); ++ is_bit_set!(is_reinstate_index, StashApplyFlags::REINSTATE_INDEX); ++} ++ ++impl Default for StashApplyFlags { ++ fn default() -> Self { ++ StashApplyFlags::DEFAULT ++ } ++} ++ ++bitflags! { ++ #[allow(missing_docs)] ++ pub struct StashFlags: u32 { ++ #[allow(missing_docs)] ++ const DEFAULT = raw::GIT_STASH_DEFAULT as u32; ++ /// All changes already added to the index are left intact in ++ /// the working directory ++ const KEEP_INDEX = raw::GIT_STASH_KEEP_INDEX as u32; ++ /// All untracked files are also stashed and then cleaned up ++ /// from the working directory ++ const INCLUDE_UNTRACKED = raw::GIT_STASH_INCLUDE_UNTRACKED as u32; ++ /// All ignored files are also stashed and then cleaned up from ++ /// the working directory ++ const INCLUDE_IGNORED = raw::GIT_STASH_INCLUDE_IGNORED as u32; ++ } ++} ++ ++impl StashFlags { ++ is_bit_set!(is_default, StashFlags::DEFAULT); ++ is_bit_set!(is_keep_index, StashFlags::KEEP_INDEX); ++ is_bit_set!(is_include_untracked, StashFlags::INCLUDE_UNTRACKED); ++ is_bit_set!(is_include_ignored, StashFlags::INCLUDE_IGNORED); ++} ++ ++impl Default for StashFlags { ++ fn default() -> Self { ++ StashFlags::DEFAULT ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::ObjectType; ++ ++ #[test] ++ fn convert() { ++ assert_eq!(ObjectType::Blob.str(), "blob"); ++ assert_eq!(ObjectType::from_str("blob"), Some(ObjectType::Blob)); ++ assert!(ObjectType::Blob.is_loose()); ++ } ++ ++} diff --cc vendor/git2-0.7.5/src/merge.rs index 000000000,000000000..15089403e new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/merge.rs @@@ -1,0 -1,0 +1,160 @@@ ++use std::marker; ++use std::mem; ++use libc::c_uint; ++ ++use {raw, Oid, Commit, FileFavor}; ++use util::Binding; ++use call::Convert; ++ ++/// A structure to represent an annotated commit, the input to merge and rebase. ++/// ++/// An annotated commit contains information about how it was looked up, which ++/// may be useful for functions like merge or rebase to provide context to the ++/// operation. ++pub struct AnnotatedCommit<'repo> { ++ raw: *mut raw::git_annotated_commit, ++ _marker: marker::PhantomData>, ++} ++ ++/// Options to specify when merging. ++pub struct MergeOptions { ++ raw: raw::git_merge_options, ++} ++ ++impl<'repo> AnnotatedCommit<'repo> { ++ /// Gets the commit ID that the given git_annotated_commit refers to ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_annotated_commit_id(self.raw)) } ++ } ++} ++ ++impl Default for MergeOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl MergeOptions { ++ /// Creates a default set of merge options. ++ pub fn new() -> MergeOptions { ++ let mut opts = MergeOptions { ++ raw: unsafe { mem::zeroed() }, ++ }; ++ assert_eq!(unsafe { ++ raw::git_merge_init_options(&mut opts.raw, 1) ++ }, 0); ++ opts ++ } ++ ++ /// Detect file renames ++ pub fn find_renames(&mut self, find: bool) -> &mut MergeOptions { ++ if find { ++ self.raw.flags |= raw::GIT_MERGE_FIND_RENAMES; ++ } else { ++ self.raw.flags &= !raw::GIT_MERGE_FIND_RENAMES; ++ } ++ self ++ } ++ ++ /// Similarity to consider a file renamed (default 50) ++ pub fn rename_threshold(&mut self, thresh: u32) -> &mut MergeOptions { ++ self.raw.rename_threshold = thresh; ++ self ++ } ++ ++ /// Maximum similarity sources to examine for renames (default 200). ++ /// If the number of rename candidates (add / delete pairs) is greater ++ /// than this value, inexact rename detection is aborted. This setting ++ /// overrides the `merge.renameLimit` configuration value. ++ pub fn target_limit(&mut self, limit: u32) -> &mut MergeOptions { ++ self.raw.target_limit = limit as c_uint; ++ self ++ } ++ ++ /// Maximum number of times to merge common ancestors to build a ++ /// virtual merge base when faced with criss-cross merges. When ++ /// this limit is reached, the next ancestor will simply be used ++ /// instead of attempting to merge it. The default is unlimited. ++ pub fn recursion_limit(&mut self, limit: u32) -> &mut MergeOptions { ++ self.raw.recursion_limit = limit as c_uint; ++ self ++ } ++ ++ /// Specify a side to favor for resolving conflicts ++ pub fn file_favor(&mut self, favor: FileFavor) -> &mut MergeOptions { ++ self.raw.file_favor = favor.convert(); ++ self ++ } ++ ++ fn flag(&mut self, opt: raw::git_merge_file_flag_t, val: bool) -> &mut MergeOptions { ++ if val { ++ self.raw.file_flags |= opt; ++ } else { ++ self.raw.file_flags &= !opt; ++ } ++ self ++ } ++ ++ /// Create standard conflicted merge files ++ pub fn standard_style(&mut self, standard: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_STYLE_MERGE, standard) ++ } ++ ++ /// Create diff3-style file ++ pub fn diff3_style(&mut self, diff3: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_STYLE_DIFF3, diff3) ++ } ++ ++ /// Condense non-alphanumeric regions for simplified diff file ++ pub fn simplify_alnum(&mut self, simplify: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_SIMPLIFY_ALNUM, simplify) ++ } ++ ++ /// Ignore all whitespace ++ pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE, ignore) ++ } ++ ++ /// Ignore changes in amount of whitespace ++ pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE, ignore) ++ } ++ ++ /// Ignore whitespace at end of line ++ pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_EOL, ignore) ++ } ++ ++ /// Use the "patience diff" algorithm ++ pub fn patience(&mut self, patience: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_DIFF_PATIENCE, patience) ++ } ++ ++ /// Take extra time to find minimal diff ++ pub fn minimal(&mut self, minimal: bool) -> &mut MergeOptions { ++ self.flag(raw::GIT_MERGE_FILE_DIFF_MINIMAL, minimal) ++ } ++ ++ /// Acquire a pointer to the underlying raw options. ++ pub unsafe fn raw(&self) -> *const raw::git_merge_options { ++ &self.raw as *const _ ++ } ++} ++ ++impl<'repo> Binding for AnnotatedCommit<'repo> { ++ type Raw = *mut raw::git_annotated_commit; ++ unsafe fn from_raw(raw: *mut raw::git_annotated_commit) ++ -> AnnotatedCommit<'repo> { ++ AnnotatedCommit { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_annotated_commit { self.raw } ++} ++ ++impl<'repo> Drop for AnnotatedCommit<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_annotated_commit_free(self.raw) } ++ } ++} diff --cc vendor/git2-0.7.5/src/message.rs index 000000000,000000000..96cdd589c new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/message.rs @@@ -1,0 -1,0 +1,52 @@@ ++use std::ffi::CString; ++ ++use libc::{c_char, c_int}; ++ ++use {raw, Buf, Error, IntoCString}; ++use util::Binding; ++ ++/// Clean up a message, removing extraneous whitespace, and ensure that the ++/// message ends with a newline. If `comment_char` is `Some`, also remove comment ++/// lines starting with that character. ++pub fn message_prettify(message: T, comment_char: Option) ++ -> Result { ++ _message_prettify(try!(message.into_c_string()), comment_char) ++} ++ ++fn _message_prettify(message: CString, comment_char: Option) ++ -> Result { ++ let ret = Buf::new(); ++ unsafe { ++ try_call!(raw::git_message_prettify(ret.raw(), message, ++ comment_char.is_some() as c_int, ++ comment_char.unwrap_or(0) as c_char)); ++ } ++ Ok(ret.as_str().unwrap().to_string()) ++} ++ ++/// The default comment character for `message_prettify` ('#') ++pub const DEFAULT_COMMENT_CHAR: Option = Some(b'#'); ++ ++#[cfg(test)] ++mod tests { ++ use {message_prettify, DEFAULT_COMMENT_CHAR}; ++ ++ #[test] ++ fn prettify() { ++ // This does not attempt to duplicate the extensive tests for ++ // git_message_prettify in libgit2, just a few representative values to ++ // make sure the interface works as expected. ++ assert_eq!(message_prettify("1\n\n\n2", None).unwrap(), ++ "1\n\n2\n"); ++ assert_eq!(message_prettify("1\n\n\n2\n\n\n3", None).unwrap(), ++ "1\n\n2\n\n3\n"); ++ assert_eq!(message_prettify("1\n# comment\n# more", None).unwrap(), ++ "1\n# comment\n# more\n"); ++ assert_eq!(message_prettify("1\n# comment\n# more", ++ DEFAULT_COMMENT_CHAR).unwrap(), ++ "1\n"); ++ assert_eq!(message_prettify("1\n; comment\n; more", ++ Some(';' as u8)).unwrap(), ++ "1\n"); ++ } ++} diff --cc vendor/git2-0.7.5/src/note.rs index 000000000,000000000..5295e5988 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/note.rs @@@ -1,0 -1,0 +1,130 @@@ ++use std::marker; ++use std::str; ++ ++use {raw, signature, Signature, Oid, Repository, Error}; ++use util::Binding; ++ ++/// A structure representing a [note][note] in git. ++/// ++/// [note]: http://git-scm.com/blog/2010/08/25/notes.html ++pub struct Note<'repo> { ++ raw: *mut raw::git_note, ++ ++ // Hmm, the current libgit2 version does not have this inside of it, but ++ // perhaps it's a good idea to keep it around? Can always remove it later I ++ // suppose... ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++/// An iterator over all of the notes within a repository. ++pub struct Notes<'repo> { ++ raw: *mut raw::git_note_iterator, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> Note<'repo> { ++ /// Get the note author ++ pub fn author(&self) -> Signature { ++ unsafe { ++ signature::from_raw_const(self, raw::git_note_author(&*self.raw)) ++ } ++ } ++ ++ /// Get the note committer ++ pub fn committer(&self) -> Signature { ++ unsafe { ++ signature::from_raw_const(self, raw::git_note_committer(&*self.raw)) ++ } ++ } ++ ++ /// Get the note message, in bytes. ++ pub fn message_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_note_message(&*self.raw)).unwrap() } ++ } ++ ++ /// Get the note message as a string, returning `None` if it is not UTF-8. ++ pub fn message(&self) -> Option<&str> { ++ str::from_utf8(self.message_bytes()).ok() ++ } ++ ++ /// Get the note object's id ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_note_id(&*self.raw)) } ++ } ++} ++ ++impl<'repo> Binding for Note<'repo> { ++ type Raw = *mut raw::git_note; ++ unsafe fn from_raw(raw: *mut raw::git_note) -> Note<'repo> { ++ Note { raw: raw, _marker: marker::PhantomData, } ++ } ++ fn raw(&self) -> *mut raw::git_note { self.raw } ++} ++ ++impl<'repo> ::std::fmt::Debug for Note<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ f.debug_struct("Note").field("id", &self.id()).finish() ++ } ++} ++ ++impl<'repo> Drop for Note<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_note_free(self.raw); } ++ } ++} ++ ++impl<'repo> Binding for Notes<'repo> { ++ type Raw = *mut raw::git_note_iterator; ++ unsafe fn from_raw(raw: *mut raw::git_note_iterator) -> Notes<'repo> { ++ Notes { raw: raw, _marker: marker::PhantomData, } ++ } ++ fn raw(&self) -> *mut raw::git_note_iterator { self.raw } ++} ++ ++impl<'repo> Iterator for Notes<'repo> { ++ type Item = Result<(Oid, Oid), Error>; ++ fn next(&mut self) -> Option> { ++ let mut note_id = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ let mut annotated_id = note_id; ++ unsafe { ++ try_call_iter!(raw::git_note_next(&mut note_id, &mut annotated_id, ++ self.raw)); ++ Some(Ok((Binding::from_raw(¬e_id as *const _), ++ Binding::from_raw(&annotated_id as *const _)))) ++ } ++ } ++} ++ ++impl<'repo> Drop for Notes<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_note_iterator_free(self.raw); } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ assert!(repo.notes(None).is_err()); ++ ++ let sig = repo.signature().unwrap(); ++ let head = repo.head().unwrap().target().unwrap(); ++ let note = repo.note(&sig, &sig, None, head, "foo", false).unwrap(); ++ assert_eq!(repo.notes(None).unwrap().count(), 1); ++ ++ let note_obj = repo.find_note(None, head).unwrap(); ++ assert_eq!(note_obj.id(), note); ++ assert_eq!(note_obj.message(), Some("foo")); ++ ++ let (a, b) = repo.notes(None).unwrap().next().unwrap().unwrap(); ++ assert_eq!(a, note); ++ assert_eq!(b, head); ++ ++ assert_eq!(repo.note_default_ref().unwrap(), "refs/notes/commits"); ++ ++ assert_eq!(sig.name(), note_obj.author().name()); ++ assert_eq!(sig.name(), note_obj.committer().name()); ++ assert!(sig.when() == note_obj.committer().when()); ++ } ++} diff --cc vendor/git2-0.7.5/src/object.rs index 000000000,000000000..725e4c77f new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/object.rs @@@ -1,0 -1,0 +1,234 @@@ ++use std::marker; ++use std::mem; ++use std::ptr; ++ ++use {raw, Oid, ObjectType, Error, Buf, Commit, Tag, Blob, Tree, Repository}; ++use {Describe, DescribeOptions}; ++use util::Binding; ++ ++/// A structure to represent a git [object][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects ++pub struct Object<'repo> { ++ raw: *mut raw::git_object, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> Object<'repo> { ++ /// Get the id (SHA1) of a repository object ++ pub fn id(&self) -> Oid { ++ unsafe { ++ Binding::from_raw(raw::git_object_id(&*self.raw)) ++ } ++ } ++ ++ /// Get the object type of an object. ++ /// ++ /// If the type is unknown, then `None` is returned. ++ pub fn kind(&self) -> Option { ++ ObjectType::from_raw(unsafe { raw::git_object_type(&*self.raw) }) ++ } ++ ++ /// Recursively peel an object until an object of the specified type is met. ++ /// ++ /// If you pass `Any` as the target type, then the object will be ++ /// peeled until the type changes (e.g. a tag will be chased until the ++ /// referenced object is no longer a tag). ++ pub fn peel(&self, kind: ObjectType) -> Result, Error> { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_object_peel(&mut raw, &*self.raw(), kind)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Recursively peel an object until a blob is found ++ pub fn peel_to_blob(&self) -> Result, Error> { ++ self.peel(ObjectType::Blob).map(|o| o.cast_or_panic(ObjectType::Blob)) ++ } ++ ++ /// Recursively peel an object until a commit is found ++ pub fn peel_to_commit(&self) -> Result, Error> { ++ self.peel(ObjectType::Commit).map(|o| o.cast_or_panic(ObjectType::Commit)) ++ } ++ ++ /// Recursively peel an object until a tag is found ++ pub fn peel_to_tag(&self) -> Result, Error> { ++ self.peel(ObjectType::Tag).map(|o| o.cast_or_panic(ObjectType::Tag)) ++ } ++ ++ /// Recursively peel an object until a tree is found ++ pub fn peel_to_tree(&self) -> Result, Error> { ++ self.peel(ObjectType::Tree).map(|o| o.cast_or_panic(ObjectType::Tree)) ++ } ++ ++ /// Get a short abbreviated OID string for the object ++ /// ++ /// This starts at the "core.abbrev" length (default 7 characters) and ++ /// iteratively extends to a longer string if that length is ambiguous. The ++ /// result will be unambiguous (at least until new objects are added to the ++ /// repository). ++ pub fn short_id(&self) -> Result { ++ unsafe { ++ let buf = Buf::new(); ++ try_call!(raw::git_object_short_id(buf.raw(), &*self.raw())); ++ Ok(buf) ++ } ++ } ++ ++ /// Attempt to view this object as a commit. ++ /// ++ /// Returns `None` if the object is not actually a commit. ++ pub fn as_commit(&self) -> Option<&Commit<'repo>> { ++ self.cast(ObjectType::Commit) ++ } ++ ++ /// Attempt to consume this object and return a commit. ++ /// ++ /// Returns `Err(self)` if this object is not actually a commit. ++ pub fn into_commit(self) -> Result, Object<'repo>> { ++ self.cast_into(ObjectType::Commit) ++ } ++ ++ /// Attempt to view this object as a tag. ++ /// ++ /// Returns `None` if the object is not actually a tag. ++ pub fn as_tag(&self) -> Option<&Tag<'repo>> { ++ self.cast(ObjectType::Tag) ++ } ++ ++ /// Attempt to consume this object and return a tag. ++ /// ++ /// Returns `Err(self)` if this object is not actually a tag. ++ pub fn into_tag(self) -> Result, Object<'repo>> { ++ self.cast_into(ObjectType::Tag) ++ } ++ ++ /// Attempt to view this object as a tree. ++ /// ++ /// Returns `None` if the object is not actually a tree. ++ pub fn as_tree(&self) -> Option<&Tree<'repo>> { ++ self.cast(ObjectType::Tree) ++ } ++ ++ /// Attempt to consume this object and return a tree. ++ /// ++ /// Returns `Err(self)` if this object is not actually a tree. ++ pub fn into_tree(self) -> Result, Object<'repo>> { ++ self.cast_into(ObjectType::Tree) ++ } ++ ++ /// Attempt to view this object as a blob. ++ /// ++ /// Returns `None` if the object is not actually a blob. ++ pub fn as_blob(&self) -> Option<&Blob<'repo>> { ++ self.cast(ObjectType::Blob) ++ } ++ ++ /// Attempt to consume this object and return a blob. ++ /// ++ /// Returns `Err(self)` if this object is not actually a blob. ++ pub fn into_blob(self) -> Result, Object<'repo>> { ++ self.cast_into(ObjectType::Blob) ++ } ++ ++ /// Describes a commit ++ /// ++ /// Performs a describe operation on this commitish object. ++ pub fn describe(&self, opts: &DescribeOptions) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_describe_commit(&mut ret, self.raw, opts.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ fn cast(&self, kind: ObjectType) -> Option<&T> { ++ assert_eq!(mem::size_of::(), mem::size_of::()); ++ if self.kind() == Some(kind) { ++ unsafe { Some(&*(self as *const _ as *const T)) } ++ } else { ++ None ++ } ++ } ++ ++ fn cast_into(self, kind: ObjectType) -> Result> { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ if self.kind() == Some(kind) { ++ Ok(unsafe { ++ let other = ptr::read(&self as *const _ as *const T); ++ mem::forget(self); ++ other ++ }) ++ } else { ++ Err(self) ++ } ++ } ++} ++ ++/// This trait is useful to export cast_or_panic into crate but not outside ++pub trait CastOrPanic { ++ fn cast_or_panic(self, kind: ObjectType) -> T; ++} ++ ++impl<'repo> CastOrPanic for Object<'repo> { ++ fn cast_or_panic(self, kind: ObjectType) -> T { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ if self.kind() == Some(kind) { ++ unsafe { ++ let other = ptr::read(&self as *const _ as *const T); ++ mem::forget(self); ++ other ++ } ++ } else { ++ let buf; ++ let akind = match self.kind() { ++ Some(akind) => akind.str(), ++ None => { ++ buf = format!("unknown ({})", unsafe { raw::git_object_type(&*self.raw) }); ++ &buf ++ } ++ }; ++ panic!("Expected object {} to be {} but it is {}", self.id(), kind.str(), akind) ++ } ++ } ++} ++ ++impl<'repo> Clone for Object<'repo> { ++ fn clone(&self) -> Object<'repo> { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ let rc = raw::git_object_dup(&mut raw, self.raw); ++ assert_eq!(rc, 0); ++ Binding::from_raw(raw) ++ } ++ } ++} ++ ++impl<'repo> ::std::fmt::Debug for Object<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ let mut ds = f.debug_struct("Object"); ++ match self.kind() { ++ Some(kind) => ds.field("kind", &kind), ++ None => ds.field("kind", &format!("Unknow ({})", unsafe { raw::git_object_type(&*self.raw) })) ++ }; ++ ds.field("id", &self.id()); ++ ds.finish() ++ } ++} ++ ++impl<'repo> Binding for Object<'repo> { ++ type Raw = *mut raw::git_object; ++ ++ unsafe fn from_raw(raw: *mut raw::git_object) -> Object<'repo> { ++ Object { raw: raw, _marker: marker::PhantomData, } ++ } ++ fn raw(&self) -> *mut raw::git_object { self.raw } ++} ++ ++impl<'repo> Drop for Object<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_object_free(self.raw) } ++ } ++} diff --cc vendor/git2-0.7.5/src/odb.rs index 000000000,000000000..c3a07a474 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/odb.rs @@@ -1,0 -1,0 +1,419 @@@ ++use std::marker; ++use std::io; ++use std::ptr; ++use std::slice; ++ ++use std::ffi::CString; ++ ++use libc::{c_char, c_int, c_void, size_t}; ++ ++use {raw, Oid, Object, ObjectType, Error}; ++use panic; ++use util::Binding; ++ ++/// A structure to represent a git object database ++pub struct Odb<'repo> { ++ raw: *mut raw::git_odb, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> Binding for Odb<'repo> { ++ type Raw = *mut raw::git_odb; ++ ++ unsafe fn from_raw(raw: *mut raw::git_odb) -> Odb<'repo> { ++ Odb { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_odb { self.raw } ++} ++ ++impl<'repo> Drop for Odb<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_odb_free(self.raw) } ++ } ++} ++ ++impl<'repo> Odb<'repo> { ++ /// Creates an object database without any backends. ++ pub fn new<'a>() -> Result, Error> { ++ unsafe { ++ let mut out = ptr::null_mut(); ++ try_call!(raw::git_odb_new(&mut out)); ++ Ok(Odb::from_raw(out)) ++ } ++ } ++ ++ /// Create object database reading stream. ++ /// ++ /// Note that most backends do not support streaming reads because they store their objects as compressed/delta'ed blobs. ++ /// If the backend does not support streaming reads, use the `read` method instead. ++ pub fn reader(&self, oid: Oid) -> Result<(OdbReader, usize, ObjectType), Error> { ++ let mut out = ptr::null_mut(); ++ let mut size = 0usize; ++ let mut otype: raw::git_otype = ObjectType::Any.raw(); ++ unsafe { ++ try_call!(raw::git_odb_open_rstream(&mut out, &mut size, &mut otype, self.raw, oid.raw())); ++ Ok((OdbReader::from_raw(out), size, ObjectType::from_raw(otype).unwrap())) ++ } ++ } ++ ++ /// Create object database writing stream. ++ /// ++ /// The type and final length of the object must be specified when opening the stream. ++ /// If the backend does not support streaming writes, use the `write` method instead. ++ pub fn writer(&self, size: usize, obj_type: ObjectType) -> Result { ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_odb_open_wstream(&mut out, self.raw, size as raw::git_off_t, obj_type.raw())); ++ Ok(OdbWriter::from_raw(out)) ++ } ++ } ++ ++ /// Iterate over all objects in the object database.s ++ pub fn foreach(&self, mut callback: C) -> Result<(), Error> ++ where C: FnMut(&Oid) -> bool ++ { ++ unsafe { ++ let mut data = ForeachCbData { callback: &mut callback }; ++ try_call!(raw::git_odb_foreach(self.raw(), ++ foreach_cb, ++ &mut data as *mut _ as *mut _)); ++ Ok(()) ++ } ++ } ++ ++ /// Read an object from the database. ++ pub fn read(&self, oid: Oid) -> Result { ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_odb_read(&mut out, self.raw, oid.raw())); ++ Ok(OdbObject::from_raw(out)) ++ } ++ } ++ ++ /// Reads the header of an object from the database ++ /// without reading the full content. ++ pub fn read_header(&self, oid: Oid) -> Result<(usize, ObjectType), Error> { ++ let mut size: usize = 0; ++ let mut kind_id: i32 = ObjectType::Any.raw(); ++ ++ unsafe { ++ try_call!(raw::git_odb_read_header(&mut size ++ as *mut size_t, ++ &mut kind_id ++ as *mut raw::git_otype, ++ self.raw, ++ oid.raw())); ++ ++ Ok((size, ObjectType::from_raw(kind_id).unwrap())) ++ } ++ } ++ ++ /// Write an object to the database. ++ pub fn write(&self, kind: ObjectType, data: &[u8]) -> Result { ++ unsafe { ++ let mut out = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ try_call!(raw::git_odb_write(&mut out, ++ self.raw, ++ data.as_ptr() ++ as *const c_void, ++ data.len(), ++ kind.raw())); ++ Ok(Oid::from_raw(&mut out)) ++ } ++ } ++ ++ /// Checks if the object database has an object. ++ pub fn exists(&self, oid: Oid) -> bool { ++ unsafe { raw::git_odb_exists(self.raw, oid.raw()) != -1 } ++ } ++ ++ /// Potentially finds an object that starts with the given prefix. ++ pub fn exists_prefix(&self, short_oid: Oid, len: usize) -> Result { ++ unsafe { ++ let mut out = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ try_call!(raw::git_odb_exists_prefix(&mut out, ++ self.raw, ++ short_oid.raw(), ++ len)); ++ Ok(Oid::from_raw(&out)) ++ } ++ } ++ ++ /// Refresh the object database. ++ /// This should never be needed, and is ++ /// provided purely for convenience. ++ /// The object database will automatically ++ /// refresh when an object is not found when ++ /// requested. ++ pub fn refresh(&self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_odb_refresh(self.raw)); ++ Ok(()) ++ } ++ } ++ ++ /// Adds an alternate disk backend to the object database. ++ pub fn add_disk_alternate(&self, path: &str) -> Result<(), Error> { ++ unsafe { ++ let path = try!(CString::new(path)); ++ try_call!(raw::git_odb_add_disk_alternate(self.raw, path)); ++ Ok(()) ++ } ++ } ++} ++ ++/// An object from the Object Database. ++pub struct OdbObject<'a> { ++ raw: *mut raw::git_odb_object, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'a> Binding for OdbObject<'a> { ++ type Raw = *mut raw::git_odb_object; ++ ++ unsafe fn from_raw(raw: *mut raw::git_odb_object) -> OdbObject<'a> { ++ OdbObject { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ ++ fn raw(&self) -> *mut raw::git_odb_object { self.raw } ++} ++ ++impl<'a> Drop for OdbObject<'a> { ++ fn drop(&mut self) { ++ unsafe { raw::git_odb_object_free(self.raw) } ++ } ++} ++ ++impl<'a> OdbObject<'a> { ++ /// Get the object type. ++ pub fn kind(&self) -> ObjectType { ++ unsafe { ObjectType::from_raw(raw::git_odb_object_type(self.raw)).unwrap() } ++ } ++ ++ /// Get the object size. ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_odb_object_size(self.raw) } ++ } ++ ++ /// Get the object data. ++ pub fn data(&self) -> &[u8] { ++ unsafe { ++ let size = self.len(); ++ let ptr : *const u8 = raw::git_odb_object_data(self.raw) as *const u8; ++ let buffer = slice::from_raw_parts(ptr, size); ++ return buffer; ++ } ++ } ++ ++ /// Get the object id. ++ pub fn id(&self) -> Oid { ++ unsafe { Oid::from_raw(raw::git_odb_object_id(self.raw)) } ++ } ++} ++ ++/// A structure to represent a git ODB rstream ++pub struct OdbReader<'repo> { ++ raw: *mut raw::git_odb_stream, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> Binding for OdbReader<'repo> { ++ type Raw = *mut raw::git_odb_stream; ++ ++ unsafe fn from_raw(raw: *mut raw::git_odb_stream) -> OdbReader<'repo> { ++ OdbReader { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_odb_stream { self.raw } ++} ++ ++impl<'repo> Drop for OdbReader<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_odb_stream_free(self.raw) } ++ } ++} ++ ++impl<'repo> io::Read for OdbReader<'repo> { ++ fn read(&mut self, buf: &mut [u8]) -> io::Result { ++ unsafe { ++ let ptr = buf.as_ptr() as *mut c_char; ++ let len = buf.len(); ++ let res = raw::git_odb_stream_read(self.raw, ptr, len); ++ if res < 0 { ++ Err(io::Error::new(io::ErrorKind::Other, "Read error")) ++ } else { ++ Ok(len) ++ } ++ } ++ } ++} ++ ++/// A structure to represent a git ODB wstream ++pub struct OdbWriter<'repo> { ++ raw: *mut raw::git_odb_stream, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> OdbWriter<'repo> { ++ /// Finish writing to an ODB stream ++ /// ++ /// This method can be used to finalize writing object to the database and get an identifier. ++ /// The object will take its final name and will be available to the odb. ++ /// This method will fail if the total number of received bytes differs from the size declared with odb_writer() ++ /// Attepting write after finishing will be ignored. ++ pub fn finalize(&mut self) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_odb_stream_finalize_write(&mut raw, self.raw)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++} ++ ++impl<'repo> Binding for OdbWriter<'repo> { ++ type Raw = *mut raw::git_odb_stream; ++ ++ unsafe fn from_raw(raw: *mut raw::git_odb_stream) -> OdbWriter<'repo> { ++ OdbWriter { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_odb_stream { self.raw } ++} ++ ++impl<'repo> Drop for OdbWriter<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_odb_stream_free(self.raw) } ++ } ++} ++ ++impl<'repo> io::Write for OdbWriter<'repo> { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ unsafe { ++ let ptr = buf.as_ptr() as *const c_char; ++ let len = buf.len(); ++ let res = raw::git_odb_stream_write(self.raw, ptr, len); ++ if res < 0 { ++ Err(io::Error::new(io::ErrorKind::Other, "Write error")) ++ } else { ++ Ok(buf.len()) ++ } ++ } ++ } ++ fn flush(&mut self) -> io::Result<()> { Ok(()) } ++} ++ ++pub type ForeachCb<'a> = FnMut(&Oid) -> bool + 'a; ++ ++struct ForeachCbData<'a> { ++ pub callback: &'a mut ForeachCb<'a> ++} ++ ++extern fn foreach_cb(id: *const raw::git_oid, ++ payload: *mut c_void) ++ -> c_int ++{ ++ panic::wrap(|| unsafe { ++ let data = &mut *(payload as *mut ForeachCbData); ++ let res = { ++ let callback = &mut data.callback; ++ callback(&Binding::from_raw(id)) ++ }; ++ ++ if res { 0 } else { 1 } ++ }).unwrap_or(1) ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ use tempdir::TempDir; ++ use {Repository, ObjectType, Oid}; ++ ++ #[test] ++ fn read() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let id = repo.blob(&dat).unwrap(); ++ let db = repo.odb().unwrap(); ++ let obj = db.read(id).unwrap(); ++ let data = obj.data(); ++ let size = obj.len(); ++ assert_eq!(size, 5); ++ assert_eq!(dat, data); ++ assert_eq!(id, obj.id()); ++ } ++ ++ #[test] ++ fn read_header() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let id = repo.blob(&dat).unwrap(); ++ let db = repo.odb().unwrap(); ++ let (size, kind) = db.read_header(id).unwrap(); ++ ++ assert_eq!(size, 5); ++ assert_eq!(kind, ObjectType::Blob); ++ } ++ ++ #[test] ++ fn write() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let db = repo.odb().unwrap(); ++ let id = db.write(ObjectType::Blob, &dat).unwrap(); ++ let blob = repo.find_blob(id).unwrap(); ++ assert_eq!(blob.content(), dat); ++ } ++ ++ #[test] ++ fn writer() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let db = repo.odb().unwrap(); ++ let mut ws = db.writer(dat.len(), ObjectType::Blob).unwrap(); ++ let wl = ws.write(&dat[0..3]).unwrap(); ++ assert_eq!(wl, 3); ++ let wl = ws.write(&dat[3..5]).unwrap(); ++ assert_eq!(wl, 2); ++ let id = ws.finalize().unwrap(); ++ let blob = repo.find_blob(id).unwrap(); ++ assert_eq!(blob.content(), dat); ++ } ++ ++ #[test] ++ fn exists() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let db = repo.odb().unwrap(); ++ let id = db.write(ObjectType::Blob, &dat).unwrap(); ++ assert!(db.exists(id)); ++ } ++ ++ #[test] ++ fn exists_prefix() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let dat = [4, 3, 5, 6, 9]; ++ let db = repo.odb().unwrap(); ++ let id = db.write(ObjectType::Blob, &dat).unwrap(); ++ let id_prefix_str = &id.to_string()[0..10]; ++ let id_prefix = Oid::from_str(id_prefix_str).unwrap(); ++ let found_oid = db.exists_prefix(id_prefix, 10).unwrap(); ++ assert_eq!(found_oid, id); ++ } ++} diff --cc vendor/git2-0.7.5/src/oid.rs index 000000000,000000000..b63fe3451 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/oid.rs @@@ -1,0 -1,0 +1,214 @@@ ++use std::fmt; ++use std::cmp::Ordering; ++use std::hash::{Hasher, Hash}; ++use std::str; ++use std::path::Path; ++use libc; ++ ++use {raw, Error, ObjectType, IntoCString}; ++ ++use util::Binding; ++ ++/// Unique identity of any object (commit, tree, blob, tag). ++#[derive(Copy, Clone)] ++pub struct Oid { ++ raw: raw::git_oid ++} ++ ++impl Oid { ++ /// Parse a hex-formatted object id into an Oid structure. ++ /// ++ /// # Errors ++ /// ++ /// Returns an error if the string is empty, is longer than 40 hex ++ /// characters, or contains any non-hex characters. ++ pub fn from_str(s: &str) -> Result { ++ ::init(); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_oid_fromstrn(&mut raw, ++ s.as_bytes().as_ptr() ++ as *const libc::c_char, ++ s.len() as libc::size_t)); ++ } ++ Ok(Oid { raw: raw }) ++ } ++ ++ /// Parse a raw object id into an Oid structure. ++ /// ++ /// If the array given is not 20 bytes in length, an error is returned. ++ pub fn from_bytes(bytes: &[u8]) -> Result { ++ ::init(); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ if bytes.len() != raw::GIT_OID_RAWSZ { ++ Err(Error::from_str("raw byte array must be 20 bytes")) ++ } else { ++ unsafe { raw::git_oid_fromraw(&mut raw, bytes.as_ptr()) } ++ Ok(Oid { raw: raw }) ++ } ++ } ++ ++ /// Creates an all zero Oid structure. ++ pub fn zero() -> Oid { ++ let out = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ Oid { raw: out } ++ } ++ ++ /// Hashes the provided data as an object of the provided type, and returns ++ /// an Oid corresponding to the result. This does not store the object ++ /// inside any object database or repository. ++ pub fn hash_object(kind: ObjectType, bytes: &[u8]) -> Result { ++ ::init(); ++ ++ let mut out = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_odb_hash(&mut out, ++ bytes.as_ptr() ++ as *const libc::c_void, ++ bytes.len(), ++ kind.raw())); ++ } ++ ++ Ok(Oid { raw: out }) ++ } ++ ++ /// Hashes the content of the provided file as an object of the provided type, ++ /// and returns an Oid corresponding to the result. This does not store the object ++ /// inside any object database or repository. ++ pub fn hash_file>(kind: ObjectType, path: P) -> Result { ++ ::init(); ++ ++ let rpath = try!(path.as_ref().into_c_string()); ++ ++ let mut out = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_odb_hashfile(&mut out, ++ rpath, ++ kind.raw())); ++ } ++ ++ Ok(Oid { raw: out }) ++ } ++ ++ /// View this OID as a byte-slice 20 bytes in length. ++ pub fn as_bytes(&self) -> &[u8] { &self.raw.id } ++ ++ /// Test if this OID is all zeros. ++ pub fn is_zero(&self) -> bool { ++ unsafe { raw::git_oid_iszero(&self.raw) == 1 } ++ } ++} ++ ++impl Binding for Oid { ++ type Raw = *const raw::git_oid; ++ ++ unsafe fn from_raw(oid: *const raw::git_oid) -> Oid { ++ Oid { raw: *oid } ++ } ++ fn raw(&self) -> *const raw::git_oid { &self.raw as *const _ } ++} ++ ++impl fmt::Debug for Oid { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ fmt::Display::fmt(self, f) ++ } ++} ++ ++impl fmt::Display for Oid { ++ /// Hex-encode this Oid into a formatter. ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let mut dst = [0u8; raw::GIT_OID_HEXSZ + 1]; ++ unsafe { ++ raw::git_oid_tostr(dst.as_mut_ptr() as *mut libc::c_char, ++ dst.len() as libc::size_t, &self.raw); ++ } ++ let s = &dst[..dst.iter().position(|&a| a == 0).unwrap()]; ++ str::from_utf8(s).unwrap().fmt(f) ++ } ++} ++ ++impl str::FromStr for Oid { ++ type Err = Error; ++ ++ /// Parse a hex-formatted object id into an Oid structure. ++ /// ++ /// # Errors ++ /// ++ /// Returns an error if the string is empty, is longer than 40 hex ++ /// characters, or contains any non-hex characters. ++ fn from_str(s: &str) -> Result { ++ Oid::from_str(s) ++ } ++} ++ ++impl PartialEq for Oid { ++ fn eq(&self, other: &Oid) -> bool { ++ unsafe { raw::git_oid_equal(&self.raw, &other.raw) != 0 } ++ } ++} ++impl Eq for Oid {} ++ ++impl PartialOrd for Oid { ++ fn partial_cmp(&self, other: &Oid) -> Option { ++ Some(self.cmp(other)) ++ } ++} ++ ++impl Ord for Oid { ++ fn cmp(&self, other: &Oid) -> Ordering { ++ match unsafe { raw::git_oid_cmp(&self.raw, &other.raw) } { ++ 0 => Ordering::Equal, ++ n if n < 0 => Ordering::Less, ++ _ => Ordering::Greater, ++ } ++ } ++} ++ ++impl Hash for Oid { ++ fn hash(&self, into: &mut H) { ++ self.raw.id.hash(into) ++ } ++} ++ ++impl AsRef<[u8]> for Oid { ++ fn as_ref(&self) -> &[u8] { self.as_bytes() } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::io::prelude::*; ++ use std::fs::File; ++ ++ use tempdir::TempDir; ++ use {ObjectType}; ++ use super::Oid; ++ ++ #[test] ++ fn conversions() { ++ assert!(Oid::from_str("foo").is_err()); ++ assert!(Oid::from_str("decbf2be529ab6557d5429922251e5ee36519817").is_ok()); ++ assert!(Oid::from_bytes(b"foo").is_err()); ++ assert!(Oid::from_bytes(b"00000000000000000000").is_ok()); ++ } ++ ++ #[test] ++ fn zero_is_zero() { ++ assert!(Oid::zero().is_zero()); ++ } ++ ++ #[test] ++ fn hash_object() { ++ let bytes = "Hello".as_bytes(); ++ assert!(Oid::hash_object(ObjectType::Blob, bytes).is_ok()); ++ } ++ ++ #[test] ++ fn hash_file() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path().join("hello.txt"); ++ let mut file = File::create(&path).unwrap(); ++ file.write_all("Hello".as_bytes()).unwrap(); ++ assert!(Oid::hash_file(ObjectType::Blob, &path).is_ok()); ++ } ++} ++ diff --cc vendor/git2-0.7.5/src/oid_array.rs index 000000000,000000000..d1108e6c5 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/oid_array.rs @@@ -1,0 -1,0 +1,50 @@@ ++//! Bindings to libgit2's raw `git_oidarray` type ++ ++use std::ops::Deref; ++ ++use oid::Oid; ++use raw; ++use util::Binding; ++use std::slice; ++use std::mem; ++ ++/// An oid array structure used by libgit2 ++/// ++/// Some apis return arrays of oids which originate from libgit2. This ++/// wrapper type behaves a little like `Vec<&Oid>` but does so without copying ++/// the underlying Oids until necessary. ++pub struct OidArray { ++ raw: raw::git_oidarray, ++} ++ ++impl Deref for OidArray { ++ type Target = [Oid]; ++ ++ fn deref(&self) -> &[Oid] { ++ unsafe { ++ debug_assert_eq!(mem::size_of::(), mem::size_of_val(&*self.raw.ids)); ++ ++ slice::from_raw_parts(self.raw.ids as *const Oid, self.raw.count as usize) ++ } ++ } ++} ++ ++impl Binding for OidArray { ++ type Raw = raw::git_oidarray; ++ unsafe fn from_raw(raw: raw::git_oidarray) -> OidArray { ++ OidArray { raw: raw } ++ } ++ fn raw(&self) -> raw::git_oidarray { self.raw } ++} ++ ++impl<'repo> ::std::fmt::Debug for OidArray { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ f.debug_tuple("OidArray").field(&self.deref()).finish() ++ } ++} ++ ++impl Drop for OidArray { ++ fn drop(&mut self) { ++ unsafe { raw::git_oidarray_free(&mut self.raw) } ++ } ++} diff --cc vendor/git2-0.7.5/src/packbuilder.rs index 000000000,000000000..e3ed5132b new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/packbuilder.rs @@@ -1,0 -1,0 +1,386 @@@ ++use std::marker; ++use std::ptr; ++use std::slice; ++use libc::{c_int, c_uint, c_void, size_t}; ++ ++use {raw, panic, Repository, Error, Oid, Revwalk, Buf}; ++use util::Binding; ++ ++/// Stages that are reported by the `PackBuilder` progress callback. ++pub enum PackBuilderStage { ++ /// Adding objects to the pack ++ AddingObjects, ++ /// Deltafication of the pack ++ Deltafication, ++} ++ ++pub type ProgressCb<'a> = FnMut(PackBuilderStage, u32, u32) -> bool + 'a; ++pub type ForEachCb<'a> = FnMut(&[u8]) -> bool + 'a; ++ ++/// A builder for creating a packfile ++pub struct PackBuilder<'repo> { ++ raw: *mut raw::git_packbuilder, ++ progress: Option>>>, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> PackBuilder<'repo> { ++ /// Insert a single object. For an optimal pack it's mandatory to insert ++ /// objects in recency order, commits followed by trees and blobs. ++ pub fn insert_object(&mut self, id: Oid, name: Option<&str>) ++ -> Result<(), Error> { ++ let name = try!(::opt_cstr(name)); ++ unsafe { ++ try_call!(raw::git_packbuilder_insert(self.raw, id.raw(), name)); ++ } ++ Ok(()) ++ } ++ ++ /// Insert a root tree object. This will add the tree as well as all ++ /// referenced trees and blobs. ++ pub fn insert_tree(&mut self, id: Oid) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_packbuilder_insert_tree(self.raw, id.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Insert a commit object. This will add a commit as well as the completed ++ /// referenced tree. ++ pub fn insert_commit(&mut self, id: Oid) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_packbuilder_insert_commit(self.raw, id.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Insert objects as given by the walk. Those commits and all objects they ++ /// reference will be inserted into the packbuilder. ++ pub fn insert_walk(&mut self, walk: &mut Revwalk) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_packbuilder_insert_walk(self.raw, walk.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Recursively insert an object and its referenced objects. Insert the ++ /// object as well as any object it references. ++ pub fn insert_recursive(&mut self, id: Oid, name: Option<&str>) ++ -> Result<(), Error> { ++ let name = try!(::opt_cstr(name)); ++ unsafe { ++ try_call!(raw::git_packbuilder_insert_recur(self.raw, ++ id.raw(), ++ name)); ++ } ++ Ok(()) ++ } ++ ++ /// Write the contents of the packfile to an in-memory buffer. The contents ++ /// of the buffer will become a valid packfile, even though there will be ++ /// no attached index. ++ pub fn write_buf(&mut self, buf: &mut Buf) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_packbuilder_write_buf(buf.raw(), self.raw)); ++ } ++ Ok(()) ++ } ++ ++ /// Create the new pack and pass each object to the callback. ++ pub fn foreach(&mut self, mut cb: F) -> Result<(), Error> ++ where F: FnMut(&[u8]) -> bool ++ { ++ let mut cb = &mut cb as &mut ForEachCb; ++ let ptr = &mut cb as *mut _; ++ unsafe { ++ try_call!(raw::git_packbuilder_foreach(self.raw, ++ foreach_c, ++ ptr as *mut _)); ++ } ++ Ok(()) ++ } ++ ++ /// `progress` will be called with progress information during pack ++ /// building. Be aware that this is called inline with pack building ++ /// operations, so performance may be affected. ++ /// ++ /// There can only be one progress callback attached, this will replace any ++ /// existing one. See `unset_progress_callback` to remove the current ++ /// progress callback without attaching a new one. ++ pub fn set_progress_callback(&mut self, progress: F) -> Result<(), Error> ++ where F: FnMut(PackBuilderStage, u32, u32) -> bool + 'repo ++ { ++ let mut progress = Box::new(Box::new(progress) as Box); ++ let ptr = &mut *progress as *mut _; ++ let progress_c = Some(progress_c as raw::git_packbuilder_progress); ++ unsafe { ++ try_call!(raw::git_packbuilder_set_callbacks(self.raw, ++ progress_c, ++ ptr as *mut _)); ++ } ++ self.progress = Some(progress); ++ Ok(()) ++ } ++ ++ /// Remove the current progress callback. See `set_progress_callback` to ++ /// set the progress callback. ++ pub fn unset_progress_callback(&mut self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_packbuilder_set_callbacks(self.raw, ++ None, ++ ptr::null_mut())); ++ self.progress = None; ++ } ++ Ok(()) ++ } ++ ++ /// Get the total number of objects the packbuilder will write out. ++ pub fn object_count(&self) -> usize { ++ unsafe { raw::git_packbuilder_object_count(self.raw) } ++ } ++ ++ /// Get the number of objects the packbuilder has already written out. ++ pub fn written(&self) -> usize { ++ unsafe { raw::git_packbuilder_written(self.raw) } ++ } ++ ++ /// Get the packfile's hash. A packfile's name is derived from the sorted ++ /// hashing of all object names. This is only correct after the packfile ++ /// has been written. ++ pub fn hash(&self) -> Option { ++ if self.object_count() == 0 { ++ unsafe { ++ Some(Binding::from_raw(raw::git_packbuilder_hash(self.raw))) ++ } ++ } else { ++ None ++ } ++ } ++} ++ ++impl<'repo> Binding for PackBuilder<'repo> { ++ type Raw = *mut raw::git_packbuilder; ++ unsafe fn from_raw(ptr: *mut raw::git_packbuilder) -> PackBuilder<'repo> { ++ PackBuilder { ++ raw: ptr, ++ progress: None, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_packbuilder { ++ self.raw ++ } ++} ++ ++impl<'repo> Drop for PackBuilder<'repo> { ++ fn drop(&mut self) { ++ unsafe { ++ raw::git_packbuilder_set_callbacks(self.raw, None, ptr::null_mut()); ++ raw::git_packbuilder_free(self.raw); ++ } ++ } ++} ++ ++impl Binding for PackBuilderStage { ++ type Raw = raw::git_packbuilder_stage_t; ++ unsafe fn from_raw(raw: raw::git_packbuilder_stage_t) -> PackBuilderStage { ++ match raw { ++ raw::GIT_PACKBUILDER_ADDING_OBJECTS => PackBuilderStage::AddingObjects, ++ raw::GIT_PACKBUILDER_DELTAFICATION => PackBuilderStage::Deltafication, ++ _ => panic!("Unknown git diff binary kind"), ++ } ++ } ++ fn raw(&self) -> raw::git_packbuilder_stage_t { ++ match *self { ++ PackBuilderStage::AddingObjects => raw::GIT_PACKBUILDER_ADDING_OBJECTS, ++ PackBuilderStage::Deltafication => raw::GIT_PACKBUILDER_DELTAFICATION, ++ } ++ } ++} ++ ++extern fn foreach_c(buf: *const c_void, ++ size: size_t, ++ data: *mut c_void) ++ -> c_int { ++ unsafe { ++ let buf = slice::from_raw_parts(buf as *const u8, size as usize); ++ ++ let r = panic::wrap(|| { ++ let data = data as *mut &mut ForEachCb; ++ (*data)(buf) ++ }); ++ if r == Some(true) { ++ 0 ++ } else { ++ -1 ++ } ++ } ++} ++ ++extern fn progress_c(stage: raw::git_packbuilder_stage_t, ++ current: c_uint, ++ total: c_uint, ++ data: *mut c_void) ++ -> c_int { ++ unsafe { ++ let stage = Binding::from_raw(stage); ++ ++ let r = panic::wrap(|| { ++ let data = data as *mut Box; ++ (*data)(stage, current, total) ++ }); ++ if r == Some(true) { ++ 0 ++ } else { ++ -1 ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::File; ++ use std::path::Path; ++ use {Buf, Repository, Oid}; ++ ++ fn commit(repo: &Repository) -> (Oid, Oid) { ++ let mut index = t!(repo.index()); ++ let root = repo.path().parent().unwrap(); ++ t!(File::create(&root.join("foo"))); ++ t!(index.add_path(Path::new("foo"))); ++ ++ let tree_id = t!(index.write_tree()); ++ let tree = t!(repo.find_tree(tree_id)); ++ let sig = t!(repo.signature()); ++ let head_id = t!(repo.refname_to_id("HEAD")); ++ let parent = t!(repo.find_commit(head_id)); ++ let commit = t!(repo.commit(Some("HEAD"), ++ &sig, ++ &sig, ++ "commit", ++ &tree, ++ &[&parent])); ++ (commit, tree_id) ++ } ++ ++ fn pack_header(len: u8) -> Vec { ++ [].into_iter() ++ .chain(b"PACK") // signature ++ .chain(&[0, 0, 0, 2]) // version number ++ .chain(&[0, 0, 0, len]) // number of objects ++ .cloned().collect::>() ++ } ++ ++ fn empty_pack_header() -> Vec { ++ pack_header(0).iter() ++ .chain(&[0x02, 0x9d, 0x08, 0x82, 0x3b, // ^ ++ 0xd8, 0xa8, 0xea, 0xb5, 0x10, // | SHA-1 of the zero ++ 0xad, 0x6a, 0xc7, 0x5c, 0x82, // | object pack header ++ 0x3c, 0xfd, 0x3e, 0xd3, 0x1e]) // v ++ .cloned().collect::>() ++ } ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let _builder = t!(repo.packbuilder()); ++ } ++ ++ #[test] ++ fn smoke_write_buf() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let mut buf = Buf::new(); ++ t!(builder.write_buf(&mut buf)); ++ assert!(builder.hash().unwrap().is_zero()); ++ assert_eq!(&*buf, &*empty_pack_header()); ++ } ++ ++ #[test] ++ fn smoke_foreach() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let mut buf = Vec::::new(); ++ t!(builder.foreach(|bytes| { ++ buf.extend(bytes); ++ true ++ })); ++ assert_eq!(&*buf, &*empty_pack_header()); ++ } ++ ++ #[test] ++ fn insert_write_buf() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let mut buf = Buf::new(); ++ let (commit, _tree) = commit(&repo); ++ t!(builder.insert_object(commit, None)); ++ assert_eq!(builder.object_count(), 1); ++ t!(builder.write_buf(&mut buf)); ++ // Just check that the correct number of objects are written ++ assert_eq!(&buf[0..12], &*pack_header(1)); ++ } ++ ++ #[test] ++ fn insert_tree_write_buf() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let mut buf = Buf::new(); ++ let (_commit, tree) = commit(&repo); ++ // will insert the tree itself and the blob, 2 objects ++ t!(builder.insert_tree(tree)); ++ assert_eq!(builder.object_count(), 2); ++ t!(builder.write_buf(&mut buf)); ++ // Just check that the correct number of objects are written ++ assert_eq!(&buf[0..12], &*pack_header(2)); ++ } ++ ++ #[test] ++ fn insert_commit_write_buf() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let mut buf = Buf::new(); ++ let (commit, _tree) = commit(&repo); ++ // will insert the commit, its tree and the blob, 3 objects ++ t!(builder.insert_commit(commit)); ++ assert_eq!(builder.object_count(), 3); ++ t!(builder.write_buf(&mut buf)); ++ // Just check that the correct number of objects are written ++ assert_eq!(&buf[0..12], &*pack_header(3)); ++ } ++ ++ #[test] ++ fn progress_callback() { ++ let mut progress_called = false; ++ { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let (commit, _tree) = commit(&repo); ++ t!(builder.set_progress_callback(|_, _, _| { ++ progress_called = true; ++ true ++ })); ++ t!(builder.insert_commit(commit)); ++ t!(builder.write_buf(&mut Buf::new())); ++ } ++ assert_eq!(progress_called, true); ++ } ++ ++ #[test] ++ fn clear_progress_callback() { ++ let mut progress_called = false; ++ { ++ let (_td, repo) = ::test::repo_init(); ++ let mut builder = t!(repo.packbuilder()); ++ let (commit, _tree) = commit(&repo); ++ t!(builder.set_progress_callback(|_, _, _| { ++ progress_called = true; ++ true ++ })); ++ t!(builder.unset_progress_callback()); ++ t!(builder.insert_commit(commit)); ++ t!(builder.write_buf(&mut Buf::new())); ++ } ++ assert_eq!(progress_called, false); ++ } ++} diff --cc vendor/git2-0.7.5/src/panic.rs index 000000000,000000000..35f2c0939 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/panic.rs @@@ -1,0 -1,0 +1,55 @@@ ++use std::any::Any; ++use std::cell::RefCell; ++ ++thread_local!(static LAST_ERROR: RefCell>> = { ++ RefCell::new(None) ++}); ++ ++#[cfg(feature = "unstable")] ++pub fn wrap T + ::std::panic::UnwindSafe>(f: F) -> Option { ++ use std::panic; ++ if LAST_ERROR.with(|slot| slot.borrow().is_some()) { ++ return None ++ } ++ match panic::catch_unwind(f) { ++ Ok(ret) => Some(ret), ++ Err(e) => { ++ LAST_ERROR.with(move |slot| { ++ *slot.borrow_mut() = Some(e); ++ }); ++ None ++ } ++ } ++} ++ ++#[cfg(not(feature = "unstable"))] ++pub fn wrap T>(f: F) -> Option { ++ struct Bomb { ++ enabled: bool, ++ } ++ impl Drop for Bomb { ++ fn drop(&mut self) { ++ if !self.enabled { ++ return ++ } ++ panic!("callback has panicked, and continuing to unwind into C \ ++ is not safe, so aborting the process"); ++ ++ } ++ } ++ let mut bomb = Bomb { enabled: true }; ++ let ret = Some(f()); ++ bomb.enabled = false; ++ ret ++} ++ ++pub fn check() { ++ let err = LAST_ERROR.with(|slot| slot.borrow_mut().take()); ++ if let Some(err) = err { ++ panic!(err) ++ } ++} ++ ++pub fn panicked() -> bool { ++ LAST_ERROR.with(|slot| slot.borrow().is_some()) ++} diff --cc vendor/git2-0.7.5/src/patch.rs index 000000000,000000000..af7e74ac1 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/patch.rs @@@ -1,0 -1,0 +1,202 @@@ ++use std::path::Path; ++use std::ptr; ++use libc::{c_int, c_void}; ++ ++use {raw, Blob, Buf, Diff, DiffDelta, DiffHunk, DiffLine, DiffOptions, Error}; ++use diff::{LineCb, print_cb}; ++use util::{Binding, into_opt_c_string}; ++ ++/// A structure representing the text changes in a single diff delta. ++/// ++/// This is an opaque structure. ++pub struct Patch { ++ raw: *mut raw::git_patch, ++} ++ ++unsafe impl Send for Patch {} ++ ++impl Binding for Patch { ++ type Raw = *mut raw::git_patch; ++ unsafe fn from_raw(raw: Self::Raw) -> Patch { ++ Patch { raw: raw } ++ } ++ fn raw(&self) -> Self::Raw { self.raw } ++} ++ ++impl Drop for Patch { ++ fn drop(&mut self) { ++ unsafe { raw::git_patch_free(self.raw) } ++ } ++} ++ ++impl Patch { ++ /// Return a Patch for one file in a Diff. ++ /// ++ /// Returns Ok(None) for an unchanged or binary file. ++ pub fn from_diff(diff: &Diff, idx: usize) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_patch_from_diff(&mut ret, diff.raw(), idx)); ++ Ok(Binding::from_raw_opt(ret)) ++ } ++ } ++ ++ /// Generate a Patch by diffing two blobs. ++ pub fn from_blobs(old_blob: &Blob, ++ old_path: Option<&Path>, ++ new_blob: &Blob, ++ new_path: Option<&Path>, ++ opts: Option<&mut DiffOptions>) ++ -> Result ++ { ++ let mut ret = ptr::null_mut(); ++ let old_path = try!(into_opt_c_string(old_path)); ++ let new_path = try!(into_opt_c_string(new_path)); ++ unsafe { ++ try_call!(raw::git_patch_from_blobs(&mut ret, ++ old_blob.raw(), ++ old_path, ++ new_blob.raw(), ++ new_path, ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Generate a Patch by diffing a blob and a buffer. ++ pub fn from_blob_and_buffer(old_blob: &Blob, ++ old_path: Option<&Path>, ++ new_buffer: &[u8], ++ new_path: Option<&Path>, ++ opts: Option<&mut DiffOptions>) ++ -> Result ++ { ++ let mut ret = ptr::null_mut(); ++ let old_path = try!(into_opt_c_string(old_path)); ++ let new_path = try!(into_opt_c_string(new_path)); ++ unsafe { ++ try_call!(raw::git_patch_from_blob_and_buffer(&mut ret, ++ old_blob.raw(), ++ old_path, ++ new_buffer.as_ptr() as *const c_void, ++ new_buffer.len(), ++ new_path, ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Generate a Patch by diffing two buffers. ++ pub fn from_buffers(old_buffer: &[u8], ++ old_path: Option<&Path>, ++ new_buffer: &[u8], ++ new_path: Option<&Path>, ++ opts: Option<&mut DiffOptions>) ++ -> Result ++ { ++ let mut ret = ptr::null_mut(); ++ let old_path = try!(into_opt_c_string(old_path)); ++ let new_path = try!(into_opt_c_string(new_path)); ++ unsafe { ++ try_call!(raw::git_patch_from_buffers(&mut ret, ++ old_buffer.as_ptr() as *const c_void, ++ old_buffer.len(), ++ old_path, ++ new_buffer.as_ptr() as *const c_void, ++ new_buffer.len(), ++ new_path, ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get the DiffDelta associated with the Patch. ++ pub fn delta(&self) -> DiffDelta { ++ unsafe { ++ Binding::from_raw(raw::git_patch_get_delta(self.raw) as *mut _) ++ } ++ } ++ ++ /// Get the number of hunks in the Patch. ++ pub fn num_hunks(&self) -> usize { ++ unsafe { ++ raw::git_patch_num_hunks(self.raw) ++ } ++ } ++ ++ /// Get the number of lines of context, additions, and deletions in the Patch. ++ pub fn line_stats(&self) -> Result<(usize, usize, usize), Error> { ++ let mut context = 0; ++ let mut additions = 0; ++ let mut deletions = 0; ++ unsafe { ++ try_call!(raw::git_patch_line_stats(&mut context, ++ &mut additions, ++ &mut deletions, ++ self.raw)); ++ } ++ Ok((context, additions, deletions)) ++ } ++ ++ /// Get a DiffHunk and its total line count from the Patch. ++ pub fn hunk(&self, hunk_idx: usize) -> Result<(DiffHunk, usize), Error> { ++ let mut ret = ptr::null(); ++ let mut lines = 0; ++ unsafe { ++ try_call!(raw::git_patch_get_hunk(&mut ret, &mut lines, self.raw, hunk_idx)); ++ Ok((Binding::from_raw(ret), lines)) ++ } ++ } ++ ++ /// Get the number of lines in a hunk. ++ pub fn num_lines_in_hunk(&self, hunk_idx: usize) -> Result { ++ unsafe { ++ Ok(try_call!(raw::git_patch_num_lines_in_hunk(self.raw, hunk_idx)) as usize) ++ } ++ } ++ ++ /// Get a DiffLine from a hunk of the Patch. ++ pub fn line_in_hunk(&self, ++ hunk_idx: usize, ++ line_of_hunk: usize) -> Result { ++ let mut ret = ptr::null(); ++ unsafe { ++ try_call!(raw::git_patch_get_line_in_hunk(&mut ret, ++ self.raw, ++ hunk_idx, ++ line_of_hunk)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get the size of a Patch's diff data in bytes. ++ pub fn size(&self, ++ include_context: bool, ++ include_hunk_headers: bool, ++ include_file_headers: bool) -> usize { ++ unsafe { ++ raw::git_patch_size(self.raw, ++ include_context as c_int, ++ include_hunk_headers as c_int, ++ include_file_headers as c_int) ++ } ++ } ++ ++ /// Print the Patch to text via a callback. ++ pub fn print(&mut self, mut line_cb: &mut LineCb) -> Result<(), Error> { ++ let ptr = &mut line_cb as *mut _ as *mut c_void; ++ unsafe { ++ try_call!(raw::git_patch_print(self.raw, print_cb, ptr)); ++ Ok(()) ++ } ++ } ++ ++ /// Get the Patch text as a Buf. ++ pub fn to_buf(&mut self) -> Result { ++ let buf = Buf::new(); ++ unsafe { ++ try_call!(raw::git_patch_to_buf(buf.raw(), self.raw)); ++ } ++ Ok(buf) ++ } ++} diff --cc vendor/git2-0.7.5/src/pathspec.rs index 000000000,000000000..5cc03ab3b new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/pathspec.rs @@@ -1,0 -1,0 +1,301 @@@ ++use std::iter::IntoIterator; ++use std::marker; ++use std::ops::Range; ++use std::path::Path; ++use std::ptr; ++use libc::size_t; ++ ++use {raw, Error, Diff, Tree, PathspecFlags, Index, Repository, DiffDelta, IntoCString}; ++use util::Binding; ++ ++/// Structure representing a compiled pathspec used for matching against various ++/// structures. ++pub struct Pathspec { ++ raw: *mut raw::git_pathspec, ++} ++ ++/// List of filenames matching a pathspec. ++pub struct PathspecMatchList<'ps> { ++ raw: *mut raw::git_pathspec_match_list, ++ _marker: marker::PhantomData<&'ps Pathspec>, ++} ++ ++/// Iterator over the matched paths in a pathspec. ++pub struct PathspecEntries<'list> { ++ range: Range, ++ list: &'list PathspecMatchList<'list>, ++} ++ ++/// Iterator over the matching diff deltas. ++pub struct PathspecDiffEntries<'list> { ++ range: Range, ++ list: &'list PathspecMatchList<'list>, ++} ++ ++/// Iterator over the failed list of pathspec items that did not match. ++pub struct PathspecFailedEntries<'list> { ++ range: Range, ++ list: &'list PathspecMatchList<'list>, ++} ++ ++impl Pathspec { ++ /// Creates a new pathspec from a list of specs to match against. ++ pub fn new(specs: I) -> Result ++ where T: IntoCString, I: IntoIterator { ++ let (_a, _b, arr) = try!(::util::iter2cstrs(specs)); ++ unsafe { ++ let mut ret = ptr::null_mut(); ++ try_call!(raw::git_pathspec_new(&mut ret, &arr)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Match a pathspec against files in a diff. ++ /// ++ /// The list returned contains the list of all matched filenames (unless you ++ /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the ++ /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is ++ /// specified. ++ pub fn match_diff(&self, diff: &Diff, flags: PathspecFlags) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_pathspec_match_diff(&mut ret, diff.raw(), ++ flags.bits(), self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Match a pathspec against files in a tree. ++ /// ++ /// The list returned contains the list of all matched filenames (unless you ++ /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the ++ /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is ++ /// specified. ++ pub fn match_tree(&self, tree: &Tree, flags: PathspecFlags) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_pathspec_match_tree(&mut ret, tree.raw(), ++ flags.bits(), self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// This matches the pathspec against the files in the repository index. ++ /// ++ /// The list returned contains the list of all matched filenames (unless you ++ /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the ++ /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is ++ /// specified. ++ pub fn match_index(&self, index: &Index, flags: PathspecFlags) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_pathspec_match_index(&mut ret, index.raw(), ++ flags.bits(), self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Match a pathspec against the working directory of a repository. ++ /// ++ /// This matches the pathspec against the current files in the working ++ /// directory of the repository. It is an error to invoke this on a bare ++ /// repo. This handles git ignores (i.e. ignored files will not be ++ /// considered to match the pathspec unless the file is tracked in the ++ /// index). ++ /// ++ /// The list returned contains the list of all matched filenames (unless you ++ /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the ++ /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is ++ /// specified. ++ pub fn match_workdir(&self, repo: &Repository, flags: PathspecFlags) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_pathspec_match_workdir(&mut ret, repo.raw(), ++ flags.bits(), self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Try to match a path against a pathspec ++ /// ++ /// Unlike most of the other pathspec matching functions, this will not fall ++ /// back on the native case-sensitivity for your platform. You must ++ /// explicitly pass flags to control case sensitivity or else this will fall ++ /// back on being case sensitive. ++ pub fn matches_path(&self, path: &Path, flags: PathspecFlags) -> bool { ++ let path = path.into_c_string().unwrap(); ++ unsafe { ++ raw::git_pathspec_matches_path(&*self.raw, flags.bits(), ++ path.as_ptr()) == 1 ++ } ++ } ++} ++ ++impl Binding for Pathspec { ++ type Raw = *mut raw::git_pathspec; ++ ++ unsafe fn from_raw(raw: *mut raw::git_pathspec) -> Pathspec { ++ Pathspec { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_pathspec { self.raw } ++} ++ ++impl Drop for Pathspec { ++ fn drop(&mut self) { ++ unsafe { raw::git_pathspec_free(self.raw) } ++ } ++} ++ ++impl<'ps> PathspecMatchList<'ps> { ++ fn entrycount(&self) -> usize { ++ unsafe { raw::git_pathspec_match_list_entrycount(&*self.raw) as usize } ++ } ++ ++ fn failed_entrycount(&self) -> usize { ++ unsafe { raw::git_pathspec_match_list_failed_entrycount(&*self.raw) as usize } ++ } ++ ++ /// Returns an iterator over the matching filenames in this list. ++ pub fn entries(&self) -> PathspecEntries { ++ let n = self.entrycount(); ++ let n = if n > 0 && self.entry(0).is_none() {0} else {n}; ++ PathspecEntries { range: 0..n, list: self } ++ } ++ ++ /// Get a matching filename by position. ++ /// ++ /// If this list was generated from a diff, then the return value will ++ /// always be `None. ++ pub fn entry(&self, i: usize) -> Option<&[u8]> { ++ unsafe { ++ let ptr = raw::git_pathspec_match_list_entry(&*self.raw, i as size_t); ++ ::opt_bytes(self, ptr) ++ } ++ } ++ ++ /// Returns an iterator over the matching diff entries in this list. ++ pub fn diff_entries(&self) -> PathspecDiffEntries { ++ let n = self.entrycount(); ++ let n = if n > 0 && self.diff_entry(0).is_none() {0} else {n}; ++ PathspecDiffEntries { range: 0..n, list: self } ++ } ++ ++ /// Get a matching diff delta by position. ++ /// ++ /// If the list was not generated from a diff, then the return value will ++ /// always be `None`. ++ pub fn diff_entry(&self, i: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_pathspec_match_list_diff_entry(&*self.raw, ++ i as size_t); ++ Binding::from_raw_opt(ptr as *mut _) ++ } ++ } ++ ++ /// Returns an iterator over the non-matching entries in this list. ++ pub fn failed_entries(&self) -> PathspecFailedEntries { ++ let n = self.failed_entrycount(); ++ let n = if n > 0 && self.failed_entry(0).is_none() {0} else {n}; ++ PathspecFailedEntries { range: 0..n, list: self } ++ } ++ ++ /// Get an original pathspec string that had no matches. ++ pub fn failed_entry(&self, i: usize) -> Option<&[u8]> { ++ unsafe { ++ let ptr = raw::git_pathspec_match_list_failed_entry(&*self.raw, ++ i as size_t); ++ ::opt_bytes(self, ptr) ++ } ++ } ++} ++ ++impl<'ps> Binding for PathspecMatchList<'ps> { ++ type Raw = *mut raw::git_pathspec_match_list; ++ ++ unsafe fn from_raw(raw: *mut raw::git_pathspec_match_list) ++ -> PathspecMatchList<'ps> { ++ PathspecMatchList { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_pathspec_match_list { self.raw } ++} ++ ++impl<'ps> Drop for PathspecMatchList<'ps> { ++ fn drop(&mut self) { ++ unsafe { raw::git_pathspec_match_list_free(self.raw) } ++ } ++} ++ ++impl<'list> Iterator for PathspecEntries<'list> { ++ type Item = &'list [u8]; ++ fn next(&mut self) -> Option<&'list [u8]> { ++ self.range.next().and_then(|i| self.list.entry(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'list> DoubleEndedIterator for PathspecEntries<'list> { ++ fn next_back(&mut self) -> Option<&'list [u8]> { ++ self.range.next_back().and_then(|i| self.list.entry(i)) ++ } ++} ++impl<'list> ExactSizeIterator for PathspecEntries<'list> {} ++ ++impl<'list> Iterator for PathspecDiffEntries<'list> { ++ type Item = DiffDelta<'list>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.list.diff_entry(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'list> DoubleEndedIterator for PathspecDiffEntries<'list> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.list.diff_entry(i)) ++ } ++} ++impl<'list> ExactSizeIterator for PathspecDiffEntries<'list> {} ++ ++impl<'list> Iterator for PathspecFailedEntries<'list> { ++ type Item = &'list [u8]; ++ fn next(&mut self) -> Option<&'list [u8]> { ++ self.range.next().and_then(|i| self.list.failed_entry(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'list> DoubleEndedIterator for PathspecFailedEntries<'list> { ++ fn next_back(&mut self) -> Option<&'list [u8]> { ++ self.range.next_back().and_then(|i| self.list.failed_entry(i)) ++ } ++} ++impl<'list> ExactSizeIterator for PathspecFailedEntries<'list> {} ++ ++#[cfg(test)] ++mod tests { ++ use PathspecFlags; ++ use super::Pathspec; ++ use std::fs::File; ++ use std::path::Path; ++ ++ #[test] ++ fn smoke() { ++ let ps = Pathspec::new(["a"].iter()).unwrap(); ++ assert!(ps.matches_path(Path::new("a"), PathspecFlags::DEFAULT)); ++ assert!(ps.matches_path(Path::new("a/b"), PathspecFlags::DEFAULT)); ++ assert!(!ps.matches_path(Path::new("b"), PathspecFlags::DEFAULT)); ++ assert!(!ps.matches_path(Path::new("ab/c"), PathspecFlags::DEFAULT)); ++ ++ let (td, repo) = ::test::repo_init(); ++ let list = ps.match_workdir(&repo, PathspecFlags::DEFAULT).unwrap(); ++ assert_eq!(list.entries().len(), 0); ++ assert_eq!(list.diff_entries().len(), 0); ++ assert_eq!(list.failed_entries().len(), 0); ++ ++ File::create(&td.path().join("a")).unwrap(); ++ ++ let list = ps.match_workdir(&repo, ::PathspecFlags::FIND_FAILURES).unwrap(); ++ assert_eq!(list.entries().len(), 1); ++ assert_eq!(list.entries().next(), Some("a".as_bytes())); ++ } ++} diff --cc vendor/git2-0.7.5/src/proxy_options.rs index 000000000,000000000..e1601749b new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/proxy_options.rs @@@ -1,0 -1,0 +1,56 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::ptr; ++ ++use raw; ++use util::Binding; ++ ++/// Options which can be specified to various fetch operations. ++#[derive(Default)] ++pub struct ProxyOptions<'a> { ++ url: Option, ++ proxy_kind: raw::git_proxy_t, ++ _marker: marker::PhantomData<&'a i32>, ++} ++ ++impl<'a> ProxyOptions<'a> { ++ /// Creates a new set of proxy options ready to be configured. ++ pub fn new() -> ProxyOptions<'a> { ++ Default::default() ++ } ++ ++ /// Try to auto-detect the proxy from the git configuration. ++ /// ++ /// Note that this will override `url` specified before. ++ pub fn auto(&mut self) -> &mut Self { ++ self.proxy_kind = raw::GIT_PROXY_AUTO; ++ self ++ } ++ ++ /// Specify the exact URL of the proxy to use. ++ /// ++ /// Note that this will override `auto` specified before. ++ pub fn url(&mut self, url: &str) -> &mut Self { ++ self.proxy_kind = raw::GIT_PROXY_SPECIFIED; ++ self.url = Some(CString::new(url).unwrap()); ++ self ++ } ++} ++ ++impl<'a> Binding for ProxyOptions<'a> { ++ type Raw = raw::git_proxy_options; ++ unsafe fn from_raw(_raw: raw::git_proxy_options) -> ProxyOptions<'a> { ++ panic!("can't create proxy from raw options") ++ } ++ ++ fn raw(&self) -> raw::git_proxy_options { ++ raw::git_proxy_options { ++ version: raw::GIT_PROXY_OPTIONS_VERSION, ++ kind: self.proxy_kind, ++ url: self.url.as_ref().map(|s| s.as_ptr()).unwrap_or(ptr::null()), ++ credentials: None, ++ certificate_check: None, ++ payload: ptr::null_mut(), ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/reference.rs index 000000000,000000000..265d76614 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/reference.rs @@@ -1,0 -1,0 +1,401 @@@ ++use std::cmp::Ordering; ++use std::ffi::CString; ++use std::marker; ++use std::mem; ++use std::ptr; ++use std::str; ++ ++use {raw, Error, Oid, Repository, ReferenceType, Object, ObjectType, Blob, Commit, Tree, Tag}; ++use object::CastOrPanic; ++use util::Binding; ++ ++struct Refdb<'repo>(&'repo Repository); ++ ++/// A structure to represent a git [reference][1]. ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-References ++pub struct Reference<'repo> { ++ raw: *mut raw::git_reference, ++ _marker: marker::PhantomData>, ++} ++ ++/// An iterator over the references in a repository. ++pub struct References<'repo> { ++ raw: *mut raw::git_reference_iterator, ++ _marker: marker::PhantomData>, ++} ++ ++/// An iterator over the names of references in a repository. ++pub struct ReferenceNames<'repo: 'references, 'references> { ++ inner: &'references mut References<'repo>, ++} ++ ++impl<'repo> Reference<'repo> { ++ /// Ensure the reference name is well-formed. ++ pub fn is_valid_name(refname: &str) -> bool { ++ ::init(); ++ let refname = CString::new(refname).unwrap(); ++ unsafe { raw::git_reference_is_valid_name(refname.as_ptr()) == 1 } ++ } ++ ++ /// Get access to the underlying raw pointer. ++ pub fn raw(&self) -> *mut raw::git_reference { self.raw } ++ ++ /// Delete an existing reference. ++ /// ++ /// This method works for both direct and symbolic references. The reference ++ /// will be immediately removed on disk. ++ /// ++ /// This function will return an error if the reference has changed from the ++ /// time it was looked up. ++ pub fn delete(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_reference_delete(self.raw)); } ++ Ok(()) ++ } ++ ++ /// Check if a reference is a local branch. ++ pub fn is_branch(&self) -> bool { ++ unsafe { raw::git_reference_is_branch(&*self.raw) == 1 } ++ } ++ ++ /// Check if a reference is a note. ++ pub fn is_note(&self) -> bool { ++ unsafe { raw::git_reference_is_note(&*self.raw) == 1 } ++ } ++ ++ /// Check if a reference is a remote tracking branch ++ pub fn is_remote(&self) -> bool { ++ unsafe { raw::git_reference_is_remote(&*self.raw) == 1 } ++ } ++ ++ /// Check if a reference is a tag ++ pub fn is_tag(&self) -> bool { ++ unsafe { raw::git_reference_is_tag(&*self.raw) == 1 } ++ } ++ ++ /// Get the reference type of a reference. ++ /// ++ /// If the type is unknown, then `None` is returned. ++ pub fn kind(&self) -> Option { ++ ReferenceType::from_raw(unsafe { raw::git_reference_type(&*self.raw) }) ++ } ++ ++ /// Get the full name of a reference. ++ /// ++ /// Returns `None` if the name is not valid utf-8. ++ pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } ++ ++ /// Get the full name of a reference. ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_reference_name(&*self.raw)).unwrap() } ++ } ++ ++ /// Get the full shorthand of a reference. ++ /// ++ /// This will transform the reference name into a name "human-readable" ++ /// version. If no shortname is appropriate, it will return the full name. ++ /// ++ /// Returns `None` if the shorthand is not valid utf-8. ++ pub fn shorthand(&self) -> Option<&str> { ++ str::from_utf8(self.shorthand_bytes()).ok() ++ } ++ ++ /// Get the full shorthand of a reference. ++ pub fn shorthand_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_reference_shorthand(&*self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the OID pointed to by a direct reference. ++ /// ++ /// Only available if the reference is direct (i.e. an object id reference, ++ /// not a symbolic one). ++ pub fn target(&self) -> Option { ++ unsafe { ++ Binding::from_raw_opt(raw::git_reference_target(&*self.raw)) ++ } ++ } ++ ++ /// Return the peeled OID target of this reference. ++ /// ++ /// This peeled OID only applies to direct references that point to a hard ++ /// Tag object: it is the result of peeling such Tag. ++ pub fn target_peel(&self) -> Option { ++ unsafe { ++ Binding::from_raw_opt(raw::git_reference_target_peel(&*self.raw)) ++ } ++ } ++ ++ /// Get full name to the reference pointed to by a symbolic reference. ++ /// ++ /// May return `None` if the reference is either not symbolic or not a ++ /// valid utf-8 string. ++ pub fn symbolic_target(&self) -> Option<&str> { ++ self.symbolic_target_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get full name to the reference pointed to by a symbolic reference. ++ /// ++ /// Only available if the reference is symbolic. ++ pub fn symbolic_target_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_reference_symbolic_target(&*self.raw)) } ++ } ++ ++ /// Resolve a symbolic reference to a direct reference. ++ /// ++ /// This method iteratively peels a symbolic reference until it resolves to ++ /// a direct reference to an OID. ++ /// ++ /// If a direct reference is passed as an argument, a copy of that ++ /// reference is returned. ++ pub fn resolve(&self) -> Result, Error> { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_resolve(&mut raw, &*self.raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Peel a reference to an object ++ /// ++ /// This method recursively peels the reference until it reaches ++ /// an object of the specified type. ++ pub fn peel(&self, kind: ObjectType) -> Result, Error> { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_peel(&mut raw, self.raw, kind)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Peel a reference to a blob ++ /// ++ /// This method recursively peels the reference until it reaches ++ /// a blob. ++ pub fn peel_to_blob(&self) -> Result, Error> { ++ Ok(try!(self.peel(ObjectType::Blob)).cast_or_panic(ObjectType::Blob)) ++ } ++ ++ /// Peel a reference to a commit ++ /// ++ /// This method recursively peels the reference until it reaches ++ /// a blob. ++ pub fn peel_to_commit(&self) -> Result, Error> { ++ Ok(try!(self.peel(ObjectType::Commit)).cast_or_panic(ObjectType::Commit)) ++ } ++ ++ /// Peel a reference to a tree ++ /// ++ /// This method recursively peels the reference until it reaches ++ /// a blob. ++ pub fn peel_to_tree(&self) -> Result, Error> { ++ Ok(try!(self.peel(ObjectType::Tree)).cast_or_panic(ObjectType::Tree)) ++ } ++ ++ /// Peel a reference to a tag ++ /// ++ /// This method recursively peels the reference until it reaches ++ /// a tag. ++ pub fn peel_to_tag(&self) -> Result, Error> { ++ Ok(try!(self.peel(ObjectType::Tag)).cast_or_panic(ObjectType::Tag)) ++ } ++ ++ /// Rename an existing reference. ++ /// ++ /// This method works for both direct and symbolic references. ++ /// ++ /// If the force flag is not enabled, and there's already a reference with ++ /// the given name, the renaming will fail. ++ pub fn rename(&mut self, new_name: &str, force: bool, ++ msg: &str) -> Result, Error> { ++ let mut raw = ptr::null_mut(); ++ let new_name = try!(CString::new(new_name)); ++ let msg = try!(CString::new(msg)); ++ unsafe { ++ try_call!(raw::git_reference_rename(&mut raw, self.raw, new_name, ++ force, msg)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Conditionally create a new reference with the same name as the given ++ /// reference but a different OID target. The reference must be a direct ++ /// reference, otherwise this will fail. ++ /// ++ /// The new reference will be written to disk, overwriting the given ++ /// reference. ++ pub fn set_target(&mut self, id: Oid, reflog_msg: &str) ++ -> Result, Error> { ++ let mut raw = ptr::null_mut(); ++ let msg = try!(CString::new(reflog_msg)); ++ unsafe { ++ try_call!(raw::git_reference_set_target(&mut raw, self.raw, ++ id.raw(), msg)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++} ++ ++impl<'repo> PartialOrd for Reference<'repo> { ++ fn partial_cmp(&self, other: &Reference<'repo>) -> Option { ++ Some(self.cmp(other)) ++ } ++} ++ ++impl<'repo> Ord for Reference<'repo> { ++ fn cmp(&self, other: &Reference<'repo>) -> Ordering { ++ match unsafe { raw::git_reference_cmp(&*self.raw, &*other.raw) } { ++ 0 => Ordering::Equal, ++ n if n < 0 => Ordering::Less, ++ _ => Ordering::Greater, ++ } ++ } ++} ++ ++impl<'repo> PartialEq for Reference<'repo> { ++ fn eq(&self, other: &Reference<'repo>) -> bool { ++ self.cmp(other) == Ordering::Equal ++ } ++} ++ ++impl<'repo> Eq for Reference<'repo> {} ++ ++impl<'repo> Binding for Reference<'repo> { ++ type Raw = *mut raw::git_reference; ++ unsafe fn from_raw(raw: *mut raw::git_reference) -> Reference<'repo> { ++ Reference { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_reference { self.raw } ++} ++ ++impl<'repo> Drop for Reference<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_reference_free(self.raw) } ++ } ++} ++ ++impl<'repo> References<'repo> { ++ /// Consumes a `References` iterator to create an iterator over just the ++ /// name of some references. ++ /// ++ /// This is more efficient if only the names are desired of references as ++ /// the references themselves don't have to be allocated and deallocated. ++ /// ++ /// The returned iterator will yield strings as opposed to a `Reference`. ++ pub fn names<'a>(&'a mut self) -> ReferenceNames<'repo, 'a> { ++ ReferenceNames { inner: self } ++ } ++} ++ ++impl<'repo> Binding for References<'repo> { ++ type Raw = *mut raw::git_reference_iterator; ++ unsafe fn from_raw(raw: *mut raw::git_reference_iterator) ++ -> References<'repo> { ++ References { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_reference_iterator { self.raw } ++} ++ ++impl<'repo> Iterator for References<'repo> { ++ type Item = Result, Error>; ++ fn next(&mut self) -> Option, Error>> { ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call_iter!(raw::git_reference_next(&mut out, self.raw)); ++ Some(Ok(Binding::from_raw(out))) ++ } ++ } ++} ++ ++impl<'repo> Drop for References<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_reference_iterator_free(self.raw) } ++ } ++} ++ ++impl<'repo, 'references> Iterator for ReferenceNames<'repo, 'references> { ++ type Item = Result<&'references str, Error>; ++ fn next(&mut self) -> Option> { ++ let mut out = ptr::null(); ++ unsafe { ++ try_call_iter!(raw::git_reference_next_name(&mut out, ++ self.inner.raw)); ++ let bytes = ::opt_bytes(self, out).unwrap(); ++ let s = str::from_utf8(bytes).unwrap(); ++ Some(Ok(mem::transmute::<&str, &'references str>(s))) ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use {Reference, ObjectType, ReferenceType}; ++ ++ #[test] ++ fn smoke() { ++ assert!(Reference::is_valid_name("refs/foo")); ++ assert!(!Reference::is_valid_name("foo")); ++ } ++ ++ #[test] ++ fn smoke2() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut head = repo.head().unwrap(); ++ assert!(head.is_branch()); ++ assert!(!head.is_remote()); ++ assert!(!head.is_tag()); ++ assert!(!head.is_note()); ++ ++ // HEAD is a symbolic reference but git_repository_head resolves it ++ // so it is a GIT_REF_OID. ++ assert_eq!(head.kind().unwrap(), ReferenceType::Oid); ++ ++ assert!(head == repo.head().unwrap()); ++ assert_eq!(head.name(), Some("refs/heads/master")); ++ ++ assert!(head == repo.find_reference("refs/heads/master").unwrap()); ++ assert_eq!(repo.refname_to_id("refs/heads/master").unwrap(), ++ head.target().unwrap()); ++ ++ assert!(head.symbolic_target().is_none()); ++ assert!(head.target_peel().is_none()); ++ ++ assert_eq!(head.shorthand(), Some("master")); ++ assert!(head.resolve().unwrap() == head); ++ ++ let mut tag1 = repo.reference("refs/tags/tag1", ++ head.target().unwrap(), ++ false, "test").unwrap(); ++ assert!(tag1.is_tag()); ++ assert_eq!(tag1.kind().unwrap(), ReferenceType::Oid); ++ ++ let peeled_commit = tag1.peel(ObjectType::Commit).unwrap(); ++ assert_eq!(ObjectType::Commit, peeled_commit.kind().unwrap()); ++ assert_eq!(tag1.target().unwrap(), peeled_commit.id()); ++ ++ tag1.delete().unwrap(); ++ ++ let mut sym1 = repo.reference_symbolic("refs/tags/tag1", ++ "refs/heads/master", false, ++ "test").unwrap(); ++ assert_eq!(sym1.kind().unwrap(), ReferenceType::Symbolic); ++ sym1.delete().unwrap(); ++ ++ { ++ assert!(repo.references().unwrap().count() == 1); ++ assert!(repo.references().unwrap().next().unwrap().unwrap() == head); ++ let mut names = repo.references().unwrap(); ++ let mut names = names.names(); ++ assert_eq!(names.next().unwrap().unwrap(), "refs/heads/master"); ++ assert!(names.next().is_none()); ++ assert!(repo.references_glob("foo").unwrap().count() == 0); ++ assert!(repo.references_glob("refs/heads/*").unwrap().count() == 1); ++ } ++ ++ let mut head = head.rename("refs/foo", true, "test").unwrap(); ++ head.delete().unwrap(); ++ ++ } ++} diff --cc vendor/git2-0.7.5/src/reflog.rs index 000000000,000000000..6996a89da new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/reflog.rs @@@ -1,0 -1,0 +1,172 @@@ ++use std::ops::Range; ++use std::marker; ++use std::str; ++use libc::size_t; ++ ++use {raw, signature, Oid, Error, Signature}; ++use util::Binding; ++ ++/// A reference log of a git repository. ++pub struct Reflog { ++ raw: *mut raw::git_reflog, ++} ++ ++/// An entry inside the reflog of a repository ++pub struct ReflogEntry<'reflog> { ++ raw: *const raw::git_reflog_entry, ++ _marker: marker::PhantomData<&'reflog Reflog>, ++} ++ ++/// An iterator over the entries inside of a reflog. ++pub struct ReflogIter<'reflog> { ++ range: Range, ++ reflog: &'reflog Reflog, ++} ++ ++impl Reflog { ++ /// Add a new entry to the in-memory reflog. ++ pub fn append(&mut self, new_oid: Oid, committer: &Signature, ++ msg: Option<&str>) -> Result<(), Error> { ++ let msg = try!(::opt_cstr(msg)); ++ unsafe { ++ try_call!(raw::git_reflog_append(self.raw, new_oid.raw(), ++ committer.raw(), msg)); ++ } ++ Ok(()) ++ } ++ ++ /// Remove an entry from the reflog by its index ++ /// ++ /// To ensure there's no gap in the log history, set rewrite_previous_entry ++ /// param value to `true`. When deleting entry n, member old_oid of entry ++ /// n-1 (if any) will be updated with the value of member new_oid of entry ++ /// n+1. ++ pub fn remove(&mut self, i: usize, rewrite_previous_entry: bool) ++ -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_reflog_drop(self.raw, i as size_t, ++ rewrite_previous_entry)); ++ } ++ Ok(()) ++ } ++ ++ /// Lookup an entry by its index ++ /// ++ /// Requesting the reflog entry with an index of 0 (zero) will return the ++ /// most recently created entry. ++ pub fn get(&self, i: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_reflog_entry_byindex(self.raw, i as size_t); ++ Binding::from_raw_opt(ptr) ++ } ++ } ++ ++ /// Get the number of log entries in a reflog ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_reflog_entrycount(self.raw) as usize } ++ } ++ ++ /// Return `true ` is there is no log entry in a reflog ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Get an iterator to all entries inside of this reflog ++ pub fn iter(&self) -> ReflogIter { ++ ReflogIter { range: 0..self.len(), reflog: self } ++ } ++ ++ /// Write an existing in-memory reflog object back to disk using an atomic ++ /// file lock. ++ pub fn write(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_reflog_write(self.raw)); } ++ Ok(()) ++ } ++} ++ ++impl Binding for Reflog { ++ type Raw = *mut raw::git_reflog; ++ ++ unsafe fn from_raw(raw: *mut raw::git_reflog) -> Reflog { ++ Reflog { raw: raw } ++ } ++ fn raw(&self) -> *mut raw::git_reflog { self.raw } ++} ++ ++impl Drop for Reflog { ++ fn drop(&mut self) { ++ unsafe { raw::git_reflog_free(self.raw) } ++ } ++} ++ ++impl<'reflog> ReflogEntry<'reflog> { ++ /// Get the committer of this entry ++ pub fn committer(&self) -> Signature { ++ unsafe { ++ let ptr = raw::git_reflog_entry_committer(self.raw); ++ signature::from_raw_const(self, ptr) ++ } ++ } ++ ++ /// Get the new oid ++ pub fn id_new(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } ++ } ++ ++ /// Get the old oid ++ pub fn id_old(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } ++ } ++ ++ /// Get the log message, returning `None` on invalid UTF-8. ++ pub fn message(&self) -> Option<&str> { ++ self.message_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the log message as a byte array. ++ pub fn message_bytes(&self) -> Option<&[u8]> { ++ unsafe { ++ ::opt_bytes(self, raw::git_reflog_entry_message(self.raw)) ++ } ++ } ++} ++ ++impl<'reflog> Binding for ReflogEntry<'reflog> { ++ type Raw = *const raw::git_reflog_entry; ++ ++ unsafe fn from_raw(raw: *const raw::git_reflog_entry) -> ReflogEntry<'reflog> { ++ ReflogEntry { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *const raw::git_reflog_entry { self.raw } ++} ++ ++impl<'reflog> Iterator for ReflogIter<'reflog> { ++ type Item = ReflogEntry<'reflog>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.reflog.get(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'reflog> DoubleEndedIterator for ReflogIter<'reflog> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.reflog.get(i)) ++ } ++} ++impl<'reflog> ExactSizeIterator for ReflogIter<'reflog> {} ++ ++#[cfg(test)] ++mod tests { ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let mut reflog = repo.reflog("HEAD").unwrap(); ++ assert_eq!(reflog.iter().len(), 1); ++ reflog.write().unwrap(); ++ ++ let entry = reflog.iter().next().unwrap(); ++ assert!(entry.message().is_some()); ++ ++ repo.reflog_rename("HEAD", "refs/heads/foo").unwrap(); ++ repo.reflog_delete("refs/heads/foo").unwrap(); ++ } ++} diff --cc vendor/git2-0.7.5/src/refspec.rs index 000000000,000000000..c814d23e5 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/refspec.rs @@@ -1,0 -1,0 +1,89 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::str; ++ ++use {raw, Direction}; ++use util::Binding; ++ ++/// A structure to represent a git [refspec][1]. ++/// ++/// Refspecs are currently mainly accessed/created through a `Remote`. ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-The-Refspec ++pub struct Refspec<'remote> { ++ raw: *const raw::git_refspec, ++ _marker: marker::PhantomData<&'remote raw::git_remote>, ++} ++ ++impl<'remote> Refspec<'remote> { ++ /// Get the refspec's direction. ++ pub fn direction(&self) -> Direction { ++ match unsafe { raw::git_refspec_direction(self.raw) } { ++ raw::GIT_DIRECTION_FETCH => Direction::Fetch, ++ raw::GIT_DIRECTION_PUSH => Direction::Push, ++ n => panic!("unknown refspec direction: {}", n), ++ } ++ } ++ ++ /// Get the destination specifier. ++ /// ++ /// If the destination is not utf-8, None is returned. ++ pub fn dst(&self) -> Option<&str> { ++ str::from_utf8(self.dst_bytes()).ok() ++ } ++ ++ /// Get the destination specifier, in bytes. ++ pub fn dst_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_refspec_dst(self.raw)).unwrap() } ++ } ++ ++ /// Check if a refspec's destination descriptor matches a reference ++ pub fn dst_matches(&self, refname: &str) -> bool { ++ let refname = CString::new(refname).unwrap(); ++ unsafe { raw::git_refspec_dst_matches(self.raw, refname.as_ptr()) == 1 } ++ } ++ ++ /// Get the source specifier. ++ /// ++ /// If the source is not utf-8, None is returned. ++ pub fn src(&self) -> Option<&str> { ++ str::from_utf8(self.src_bytes()).ok() ++ } ++ ++ /// Get the source specifier, in bytes. ++ pub fn src_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_refspec_src(self.raw)).unwrap() } ++ } ++ ++ /// Check if a refspec's source descriptor matches a reference ++ pub fn src_matches(&self, refname: &str) -> bool { ++ let refname = CString::new(refname).unwrap(); ++ unsafe { raw::git_refspec_src_matches(self.raw, refname.as_ptr()) == 1 } ++ } ++ ++ /// Get the force update setting. ++ pub fn is_force(&self) -> bool { ++ unsafe { raw::git_refspec_force(self.raw) == 1 } ++ } ++ ++ /// Get the refspec's string. ++ /// ++ /// Returns None if the string is not valid utf8. ++ pub fn str(&self) -> Option<&str> { ++ str::from_utf8(self.bytes()).ok() ++ } ++ ++ /// Get the refspec's string as a byte array ++ pub fn bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_refspec_string(self.raw)).unwrap() } ++ } ++} ++ ++impl<'remote> Binding for Refspec<'remote> { ++ type Raw = *const raw::git_refspec; ++ ++ unsafe fn from_raw(raw: *const raw::git_refspec) -> Refspec<'remote> { ++ Refspec { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *const raw::git_refspec { self.raw } ++} diff --cc vendor/git2-0.7.5/src/remote.rs index 000000000,000000000..b385a1dfa new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/remote.rs @@@ -1,0 -1,0 +1,753 @@@ ++use std::ffi::CString; ++use std::ops::Range; ++use std::marker; ++use std::mem; ++use std::ptr; ++use std::slice; ++use std::str; ++use libc; ++ ++use {raw, Direction, Error, Refspec, Oid, FetchPrune, ProxyOptions}; ++use {RemoteCallbacks, Progress, Repository, AutotagOption}; ++use string_array::StringArray; ++use util::Binding; ++ ++/// A structure representing a [remote][1] of a git repository. ++/// ++/// [1]: http://git-scm.com/book/en/Git-Basics-Working-with-Remotes ++/// ++/// The lifetime is the lifetime of the repository that it is attached to. The ++/// remote is used to manage fetches and pushes as well as refspecs. ++pub struct Remote<'repo> { ++ raw: *mut raw::git_remote, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++/// An iterator over the refspecs that a remote contains. ++pub struct Refspecs<'remote> { ++ range: Range, ++ remote: &'remote Remote<'remote>, ++} ++ ++/// Description of a reference advertised bya remote server, given out on calls ++/// to `list`. ++pub struct RemoteHead<'remote> { ++ raw: *const raw::git_remote_head, ++ _marker: marker::PhantomData<&'remote str>, ++} ++ ++/// Options which can be specified to various fetch operations. ++pub struct FetchOptions<'cb> { ++ callbacks: Option>, ++ proxy: Option>, ++ prune: FetchPrune, ++ update_fetchhead: bool, ++ download_tags: AutotagOption, ++} ++ ++/// Options to control the behavior of a git push. ++pub struct PushOptions<'cb> { ++ callbacks: Option>, ++ proxy: Option>, ++ pb_parallelism: u32, ++} ++ ++/// Holds callbacks for a connection to a `Remote`. Disconnects when dropped ++pub struct RemoteConnection<'repo, 'connection, 'cb> where 'repo: 'connection { ++ _callbacks: Box>, ++ _proxy: ProxyOptions<'cb>, ++ remote: &'connection mut Remote<'repo>, ++} ++ ++pub fn remote_into_raw(remote: Remote) -> *mut raw::git_remote { ++ let ret = remote.raw; ++ mem::forget(remote); ++ return ret ++} ++ ++impl<'repo> Remote<'repo> { ++ /// Ensure the remote name is well-formed. ++ pub fn is_valid_name(remote_name: &str) -> bool { ++ ::init(); ++ let remote_name = CString::new(remote_name).unwrap(); ++ unsafe { raw::git_remote_is_valid_name(remote_name.as_ptr()) == 1 } ++ } ++ ++ /// Get the remote's name. ++ /// ++ /// Returns `None` if this remote has not yet been named or if the name is ++ /// not valid utf-8 ++ pub fn name(&self) -> Option<&str> { ++ self.name_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the remote's name, in bytes. ++ /// ++ /// Returns `None` if this remote has not yet been named ++ pub fn name_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_remote_name(&*self.raw)) } ++ } ++ ++ /// Get the remote's url. ++ /// ++ /// Returns `None` if the url is not valid utf-8 ++ pub fn url(&self) -> Option<&str> { ++ str::from_utf8(self.url_bytes()).ok() ++ } ++ ++ /// Get the remote's url as a byte array. ++ pub fn url_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_remote_url(&*self.raw)).unwrap() } ++ } ++ ++ /// Get the remote's pushurl. ++ /// ++ /// Returns `None` if the pushurl is not valid utf-8 ++ pub fn pushurl(&self) -> Option<&str> { ++ self.pushurl_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the remote's pushurl as a byte array. ++ pub fn pushurl_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_remote_pushurl(&*self.raw)) } ++ } ++ ++ /// Open a connection to a remote. ++ pub fn connect(&mut self, dir: Direction) -> Result<(), Error> { ++ // TODO: can callbacks be exposed safely? ++ unsafe { ++ try_call!(raw::git_remote_connect(self.raw, dir, ++ ptr::null(), ++ ptr::null(), ++ ptr::null())); ++ } ++ Ok(()) ++ } ++ ++ /// Open a connection to a remote with callbacks and proxy settings ++ /// ++ /// Returns a `RemoteConnection` that will disconnect once dropped ++ pub fn connect_auth<'connection, 'cb>(&'connection mut self, ++ dir: Direction, ++ cb: Option>, ++ proxy_options: Option>) ++ -> Result, Error> { ++ ++ let cb = Box::new(cb.unwrap_or_else(RemoteCallbacks::new)); ++ let proxy_options = proxy_options.unwrap_or_else(ProxyOptions::new); ++ unsafe { ++ try_call!(raw::git_remote_connect(self.raw, dir, ++ &cb.raw(), ++ &proxy_options.raw(), ++ ptr::null())); ++ } ++ ++ Ok(RemoteConnection { ++ _callbacks: cb, ++ _proxy: proxy_options, ++ remote: self, ++ }) ++ } ++ ++ /// Check whether the remote is connected ++ pub fn connected(&mut self) -> bool { ++ unsafe { raw::git_remote_connected(self.raw) == 1 } ++ } ++ ++ /// Disconnect from the remote ++ pub fn disconnect(&mut self) { ++ unsafe { raw::git_remote_disconnect(self.raw) } ++ } ++ ++ /// Download and index the packfile ++ /// ++ /// Connect to the remote if it hasn't been done yet, negotiate with the ++ /// remote git which objects are missing, download and index the packfile. ++ /// ++ /// The .idx file will be created and both it and the packfile with be ++ /// renamed to their final name. ++ /// ++ /// The `specs` argument is a list of refspecs to use for this negotiation ++ /// and download. Use an empty array to use the base refspecs. ++ pub fn download(&mut self, specs: &[&str], opts: Option<&mut FetchOptions>) ++ -> Result<(), Error> { ++ let (_a, _b, arr) = try!(::util::iter2cstrs(specs.iter())); ++ let raw = opts.map(|o| o.raw()); ++ unsafe { ++ try_call!(raw::git_remote_download(self.raw, &arr, raw.as_ref())); ++ } ++ Ok(()) ++ } ++ ++ /// Get the number of refspecs for a remote ++ pub fn refspecs<'a>(&'a self) -> Refspecs<'a> { ++ let cnt = unsafe { raw::git_remote_refspec_count(&*self.raw) as usize }; ++ Refspecs { range: 0..cnt, remote: self } ++ } ++ ++ /// Get the `nth` refspec from this remote. ++ /// ++ /// The `refspecs` iterator can be used to iterate over all refspecs. ++ pub fn get_refspec(&self, i: usize) -> Option> { ++ unsafe { ++ let ptr = raw::git_remote_get_refspec(&*self.raw, ++ i as libc::size_t); ++ Binding::from_raw_opt(ptr) ++ } ++ } ++ ++ /// Download new data and update tips ++ /// ++ /// Convenience function to connect to a remote, download the data, ++ /// disconnect and update the remote-tracking branches. ++ /// ++ /// # Examples ++ /// ++ /// Example of functionality similar to `git fetch origin/master`: ++ /// ++ /// ```no_run ++ /// fn fetch_origin_master(repo: git2::Repository) -> Result<(), git2::Error> { ++ /// repo.find_remote("origin")?.fetch(&["master"], None, None) ++ /// } ++ /// ++ /// let repo = git2::Repository::discover("rust").unwrap(); ++ /// fetch_origin_master(repo).unwrap(); ++ /// ``` ++ pub fn fetch(&mut self, ++ refspecs: &[&str], ++ opts: Option<&mut FetchOptions>, ++ reflog_msg: Option<&str>) -> Result<(), Error> { ++ let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); ++ let msg = try!(::opt_cstr(reflog_msg)); ++ let raw = opts.map(|o| o.raw()); ++ unsafe { ++ try_call!(raw::git_remote_fetch(self.raw, &arr, raw.as_ref(), msg)); ++ } ++ Ok(()) ++ } ++ ++ /// Update the tips to the new state ++ pub fn update_tips(&mut self, ++ callbacks: Option<&mut RemoteCallbacks>, ++ update_fetchhead: bool, ++ download_tags: AutotagOption, ++ msg: Option<&str>) -> Result<(), Error> { ++ let msg = try!(::opt_cstr(msg)); ++ let cbs = callbacks.map(|cb| cb.raw()); ++ unsafe { ++ try_call!(raw::git_remote_update_tips(self.raw, cbs.as_ref(), ++ update_fetchhead, ++ download_tags, msg)); ++ } ++ Ok(()) ++ } ++ ++ /// Perform a push ++ /// ++ /// Perform all the steps for a push. If no refspecs are passed then the ++ /// configured refspecs will be used. ++ /// ++ /// Note that you'll likely want to use `RemoteCallbacks` and set ++ /// `push_update_reference` to test whether all the references were pushed ++ /// successfully. ++ pub fn push(&mut self, ++ refspecs: &[&str], ++ opts: Option<&mut PushOptions>) -> Result<(), Error> { ++ let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); ++ let raw = opts.map(|o| o.raw()); ++ unsafe { ++ try_call!(raw::git_remote_push(self.raw, &arr, raw.as_ref())); ++ } ++ Ok(()) ++ } ++ ++ /// Get the statistics structure that is filled in by the fetch operation. ++ pub fn stats(&self) -> Progress { ++ unsafe { ++ Binding::from_raw(raw::git_remote_stats(self.raw)) ++ } ++ } ++ ++ /// Get the remote repository's reference advertisement list. ++ /// ++ /// Get the list of references with which the server responds to a new ++ /// connection. ++ /// ++ /// The remote (or more exactly its transport) must have connected to the ++ /// remote repository. This list is available as soon as the connection to ++ /// the remote is initiated and it remains available after disconnecting. ++ pub fn list(&self) -> Result<&[RemoteHead], Error> { ++ let mut size = 0; ++ let mut base = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_remote_ls(&mut base, &mut size, self.raw)); ++ assert_eq!(mem::size_of::(), ++ mem::size_of::<*const raw::git_remote_head>()); ++ let slice = slice::from_raw_parts(base as *const _, size as usize); ++ Ok(mem::transmute::<&[*const raw::git_remote_head], ++ &[RemoteHead]>(slice)) ++ } ++ } ++ ++ /// Get the remote's list of fetch refspecs ++ pub fn fetch_refspecs(&self) -> Result { ++ unsafe { ++ let mut raw: raw::git_strarray = mem::zeroed(); ++ try_call!(raw::git_remote_get_fetch_refspecs(&mut raw, self.raw)); ++ Ok(StringArray::from_raw(raw)) ++ } ++ } ++ ++ /// Get the remote's list of push refspecs ++ pub fn push_refspecs(&self) -> Result { ++ unsafe { ++ let mut raw: raw::git_strarray = mem::zeroed(); ++ try_call!(raw::git_remote_get_push_refspecs(&mut raw, self.raw)); ++ Ok(StringArray::from_raw(raw)) ++ } ++ } ++} ++ ++impl<'repo> Clone for Remote<'repo> { ++ fn clone(&self) -> Remote<'repo> { ++ let mut ret = ptr::null_mut(); ++ let rc = unsafe { call!(raw::git_remote_dup(&mut ret, self.raw)) }; ++ assert_eq!(rc, 0); ++ Remote { ++ raw: ret, ++ _marker: marker::PhantomData, ++ } ++ } ++} ++ ++impl<'repo> Binding for Remote<'repo> { ++ type Raw = *mut raw::git_remote; ++ ++ unsafe fn from_raw(raw: *mut raw::git_remote) -> Remote<'repo> { ++ Remote { ++ raw: raw, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_remote { self.raw } ++} ++ ++impl<'repo> Drop for Remote<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_remote_free(self.raw) } ++ } ++} ++ ++impl<'repo> Iterator for Refspecs<'repo> { ++ type Item = Refspec<'repo>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.remote.get_refspec(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'repo> DoubleEndedIterator for Refspecs<'repo> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.remote.get_refspec(i)) ++ } ++} ++impl<'repo> ExactSizeIterator for Refspecs<'repo> {} ++ ++#[allow(missing_docs)] // not documented in libgit2 :( ++impl<'remote> RemoteHead<'remote> { ++ /// Flag if this is available locally. ++ pub fn is_local(&self) -> bool { ++ unsafe { (*self.raw).local != 0 } ++ } ++ ++ pub fn oid(&self) -> Oid { ++ unsafe { Binding::from_raw(&(*self.raw).oid as *const _) } ++ } ++ pub fn loid(&self) -> Oid { ++ unsafe { Binding::from_raw(&(*self.raw).loid as *const _) } ++ } ++ ++ pub fn name(&self) -> &str { ++ let b = unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() }; ++ str::from_utf8(b).unwrap() ++ } ++ ++ pub fn symref_target(&self) -> Option<&str> { ++ let b = unsafe { ::opt_bytes(self, (*self.raw).symref_target) }; ++ b.map(|b| str::from_utf8(b).unwrap()) ++ } ++} ++ ++impl<'cb> Default for FetchOptions<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl<'cb> FetchOptions<'cb> { ++ /// Creates a new blank set of fetch options ++ pub fn new() -> FetchOptions<'cb> { ++ FetchOptions { ++ callbacks: None, ++ proxy: None, ++ prune: FetchPrune::Unspecified, ++ update_fetchhead: true, ++ download_tags: AutotagOption::Unspecified, ++ } ++ } ++ ++ /// Set the callbacks to use for the fetch operation. ++ pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { ++ self.callbacks = Some(cbs); ++ self ++ } ++ ++ /// Set the proxy options to use for the fetch operation. ++ pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { ++ self.proxy = Some(opts); ++ self ++ } ++ ++ /// Set whether to perform a prune after the fetch. ++ pub fn prune(&mut self, prune: FetchPrune) -> &mut Self { ++ self.prune = prune; ++ self ++ } ++ ++ /// Set whether to write the results to FETCH_HEAD. ++ /// ++ /// Defaults to `true`. ++ pub fn update_fetchhead(&mut self, update: bool) -> &mut Self { ++ self.update_fetchhead = update; ++ self ++ } ++ ++ /// Set how to behave regarding tags on the remote, such as auto-downloading ++ /// tags for objects we're downloading or downloading all of them. ++ /// ++ /// The default is to auto-follow tags. ++ pub fn download_tags(&mut self, opt: AutotagOption) -> &mut Self { ++ self.download_tags = opt; ++ self ++ } ++} ++ ++impl<'cb> Binding for FetchOptions<'cb> { ++ type Raw = raw::git_fetch_options; ++ ++ unsafe fn from_raw(_raw: raw::git_fetch_options) -> FetchOptions<'cb> { ++ panic!("unimplemented"); ++ } ++ fn raw(&self) -> raw::git_fetch_options { ++ raw::git_fetch_options { ++ version: 1, ++ callbacks: self.callbacks.as_ref().map(|m| m.raw()) ++ .unwrap_or_else(|| RemoteCallbacks::new().raw()), ++ proxy_opts: self.proxy.as_ref().map(|m| m.raw()) ++ .unwrap_or_else(|| ProxyOptions::new().raw()), ++ prune: ::call::convert(&self.prune), ++ update_fetchhead: ::call::convert(&self.update_fetchhead), ++ download_tags: ::call::convert(&self.download_tags), ++ // TODO: expose this as a builder option ++ custom_headers: raw::git_strarray { ++ count: 0, ++ strings: ptr::null_mut(), ++ }, ++ } ++ } ++} ++ ++ ++impl<'cb> Default for PushOptions<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl<'cb> PushOptions<'cb> { ++ /// Creates a new blank set of push options ++ pub fn new() -> PushOptions<'cb> { ++ PushOptions { ++ callbacks: None, ++ proxy: None, ++ pb_parallelism: 1, ++ } ++ } ++ ++ /// Set the callbacks to use for the fetch operation. ++ pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { ++ self.callbacks = Some(cbs); ++ self ++ } ++ ++ /// Set the proxy options to use for the fetch operation. ++ pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { ++ self.proxy = Some(opts); ++ self ++ } ++ ++ /// If the transport being used to push to the remote requires the creation ++ /// of a pack file, this controls the number of worker threads used by the ++ /// packbuilder when creating that pack file to be sent to the remote. ++ /// ++ /// if set to 0 the packbuilder will auto-detect the number of threads to ++ /// create, and the default value is 1. ++ pub fn packbuilder_parallelism(&mut self, parallel: u32) -> &mut Self { ++ self.pb_parallelism = parallel; ++ self ++ } ++} ++ ++impl<'cb> Binding for PushOptions<'cb> { ++ type Raw = raw::git_push_options; ++ ++ unsafe fn from_raw(_raw: raw::git_push_options) -> PushOptions<'cb> { ++ panic!("unimplemented"); ++ } ++ fn raw(&self) -> raw::git_push_options { ++ raw::git_push_options { ++ version: 1, ++ callbacks: self.callbacks.as_ref() ++ .map(|m| m.raw()) ++ .unwrap_or_else(|| RemoteCallbacks::new().raw()), ++ proxy_opts: self.proxy.as_ref().map(|m| m.raw()) ++ .unwrap_or_else(|| ProxyOptions::new().raw()), ++ pb_parallelism: self.pb_parallelism as libc::c_uint, ++ // TODO: expose this as a builder option ++ custom_headers: raw::git_strarray { ++ count: 0, ++ strings: ptr::null_mut(), ++ }, ++ } ++ } ++} ++ ++impl<'repo, 'connection, 'cb> RemoteConnection<'repo, 'connection, 'cb> { ++ /// Check whether the remote is (still) connected ++ pub fn connected(&mut self) -> bool { ++ self.remote.connected() ++ } ++ ++ /// Get the remote repository's reference advertisement list. ++ /// ++ /// This list is available as soon as the connection to ++ /// the remote is initiated and it remains available after disconnecting. ++ pub fn list(&self) -> Result<&[RemoteHead], Error> { ++ self.remote.list() ++ } ++} ++ ++impl<'repo, 'connection, 'cb> Drop for RemoteConnection<'repo, 'connection, 'cb> { ++ fn drop(&mut self) { ++ self.remote.disconnect() ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::cell::Cell; ++ use tempdir::TempDir; ++ use {Repository, Remote, RemoteCallbacks, Direction, FetchOptions}; ++ use {AutotagOption, PushOptions}; ++ ++ #[test] ++ fn smoke() { ++ let (td, repo) = ::test::repo_init(); ++ t!(repo.remote("origin", "/path/to/nowhere")); ++ drop(repo); ++ ++ let repo = t!(Repository::init(td.path())); ++ let origin = t!(repo.find_remote("origin")); ++ assert_eq!(origin.name(), Some("origin")); ++ assert_eq!(origin.url(), Some("/path/to/nowhere")); ++ assert_eq!(origin.pushurl(), None); ++ ++ t!(repo.remote_set_url("origin", "/path/to/elsewhere")); ++ t!(repo.remote_set_pushurl("origin", Some("/path/to/elsewhere"))); ++ ++ let stats = origin.stats(); ++ assert_eq!(stats.total_objects(), 0); ++ } ++ ++ #[test] ++ fn create_remote() { ++ let td = TempDir::new("test").unwrap(); ++ let remote = td.path().join("remote"); ++ Repository::init_bare(&remote).unwrap(); ++ ++ let (_td, repo) = ::test::repo_init(); ++ let url = if cfg!(unix) { ++ format!("file://{}", remote.display()) ++ } else { ++ format!("file:///{}", remote.display().to_string() ++ .replace("\\", "/")) ++ }; ++ ++ let mut origin = repo.remote("origin", &url).unwrap(); ++ assert_eq!(origin.name(), Some("origin")); ++ assert_eq!(origin.url(), Some(&url[..])); ++ assert_eq!(origin.pushurl(), None); ++ ++ { ++ let mut specs = origin.refspecs(); ++ let spec = specs.next().unwrap(); ++ assert!(specs.next().is_none()); ++ assert_eq!(spec.str(), Some("+refs/heads/*:refs/remotes/origin/*")); ++ assert_eq!(spec.dst(), Some("refs/remotes/origin/*")); ++ assert_eq!(spec.src(), Some("refs/heads/*")); ++ assert!(spec.is_force()); ++ } ++ assert!(origin.refspecs().next_back().is_some()); ++ { ++ let remotes = repo.remotes().unwrap(); ++ assert_eq!(remotes.len(), 1); ++ assert_eq!(remotes.get(0), Some("origin")); ++ assert_eq!(remotes.iter().count(), 1); ++ assert_eq!(remotes.iter().next().unwrap(), Some("origin")); ++ } ++ ++ origin.connect(Direction::Push).unwrap(); ++ assert!(origin.connected()); ++ origin.disconnect(); ++ ++ origin.connect(Direction::Fetch).unwrap(); ++ assert!(origin.connected()); ++ origin.download(&[], None).unwrap(); ++ origin.disconnect(); ++ ++ { ++ let mut connection = origin.connect_auth(Direction::Push, None, None).unwrap(); ++ assert!(connection.connected()); ++ } ++ assert!(!origin.connected()); ++ ++ { ++ let mut connection = origin.connect_auth(Direction::Fetch, None, None).unwrap(); ++ assert!(connection.connected()); ++ } ++ assert!(!origin.connected()); ++ ++ origin.fetch(&[], None, None).unwrap(); ++ origin.fetch(&[], None, Some("foo")).unwrap(); ++ origin.update_tips(None, true, AutotagOption::Unspecified, None).unwrap(); ++ origin.update_tips(None, true, AutotagOption::All, Some("foo")).unwrap(); ++ ++ t!(repo.remote_add_fetch("origin", "foo")); ++ t!(repo.remote_add_fetch("origin", "bar")); ++ } ++ ++ #[test] ++ fn rename_remote() { ++ let (_td, repo) = ::test::repo_init(); ++ repo.remote("origin", "foo").unwrap(); ++ drop(repo.remote_rename("origin", "foo")); ++ drop(repo.remote_delete("foo")); ++ } ++ ++ #[test] ++ fn create_remote_anonymous() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ ++ let origin = repo.remote_anonymous("/path/to/nowhere").unwrap(); ++ assert_eq!(origin.name(), None); ++ drop(origin.clone()); ++ } ++ ++ #[test] ++ fn is_valid() { ++ assert!(Remote::is_valid_name("foobar")); ++ assert!(!Remote::is_valid_name("\x01")); ++ } ++ ++ #[test] ++ fn transfer_cb() { ++ let (td, _repo) = ::test::repo_init(); ++ let td2 = TempDir::new("git").unwrap(); ++ let url = ::test::path2url(&td.path()); ++ ++ let repo = Repository::init(td2.path()).unwrap(); ++ let progress_hit = Cell::new(false); ++ { ++ let mut callbacks = RemoteCallbacks::new(); ++ let mut origin = repo.remote("origin", &url).unwrap(); ++ ++ callbacks.transfer_progress(|_progress| { ++ progress_hit.set(true); ++ true ++ }); ++ origin.fetch(&[], ++ Some(FetchOptions::new().remote_callbacks(callbacks)), ++ None).unwrap(); ++ ++ let list = t!(origin.list()); ++ assert_eq!(list.len(), 2); ++ assert_eq!(list[0].name(), "HEAD"); ++ assert!(!list[0].is_local()); ++ assert_eq!(list[1].name(), "refs/heads/master"); ++ assert!(!list[1].is_local()); ++ } ++ assert!(progress_hit.get()); ++ } ++ ++ /// This test is meant to assure that the callbacks provided to connect will not cause ++ /// segfaults ++ #[test] ++ fn connect_list() { ++ let (td, _repo) = ::test::repo_init(); ++ let td2 = TempDir::new("git").unwrap(); ++ let url = ::test::path2url(&td.path()); ++ ++ let repo = Repository::init(td2.path()).unwrap(); ++ let mut callbacks = RemoteCallbacks::new(); ++ callbacks.sideband_progress(|_progress| { ++ // no-op ++ true ++ }); ++ ++ let mut origin = repo.remote("origin", &url).unwrap(); ++ ++ { ++ let mut connection = origin.connect_auth(Direction::Fetch, Some(callbacks), None).unwrap(); ++ assert!(connection.connected()); ++ ++ let list = t!(connection.list()); ++ assert_eq!(list.len(), 2); ++ assert_eq!(list[0].name(), "HEAD"); ++ assert!(!list[0].is_local()); ++ assert_eq!(list[1].name(), "refs/heads/master"); ++ assert!(!list[1].is_local()); ++ } ++ assert!(!origin.connected()); ++ } ++ ++ #[test] ++ fn push() { ++ let (_td, repo) = ::test::repo_init(); ++ let td2 = TempDir::new("git1").unwrap(); ++ let td3 = TempDir::new("git2").unwrap(); ++ let url = ::test::path2url(&td2.path()); ++ ++ Repository::init_bare(td2.path()).unwrap(); ++ // git push ++ let mut remote = repo.remote("origin", &url).unwrap(); ++ let mut updated = false; ++ { ++ let mut callbacks = RemoteCallbacks::new(); ++ callbacks.push_update_reference(|refname, status| { ++ updated = true; ++ assert_eq!(refname, "refs/heads/master"); ++ assert_eq!(status, None); ++ Ok(()) ++ }); ++ let mut options = PushOptions::new(); ++ options.remote_callbacks(callbacks); ++ remote.push(&["refs/heads/master"], Some(&mut options)).unwrap(); ++ } ++ assert!(updated); ++ ++ let repo = Repository::clone(&url, td3.path()).unwrap(); ++ let commit = repo.head().unwrap().target().unwrap(); ++ let commit = repo.find_commit(commit).unwrap(); ++ assert_eq!(commit.message(), Some("initial")); ++ } ++} diff --cc vendor/git2-0.7.5/src/remote_callbacks.rs index 000000000,000000000..fdeaa83f6 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/remote_callbacks.rs @@@ -1,0 -1,0 +1,391 @@@ ++use std::ffi::{CStr, CString}; ++use std::marker; ++use std::mem; ++use std::slice; ++use std::ptr; ++use std::str; ++use libc::{c_void, c_int, c_char, c_uint}; ++ ++use {raw, panic, Error, Cred, CredentialType, Oid}; ++use cert::Cert; ++use util::Binding; ++ ++/// A structure to contain the callbacks which are invoked when a repository is ++/// being updated or downloaded. ++/// ++/// These callbacks are used to manage facilities such as authentication, ++/// transfer progress, etc. ++pub struct RemoteCallbacks<'a> { ++ progress: Option>>, ++ credentials: Option>>, ++ sideband_progress: Option>>, ++ update_tips: Option>>, ++ certificate_check: Option>>, ++ push_update_reference: Option>>, ++} ++ ++/// Struct representing the progress by an in-flight transfer. ++pub struct Progress<'a> { ++ raw: ProgressState, ++ _marker: marker::PhantomData<&'a raw::git_transfer_progress>, ++} ++ ++enum ProgressState { ++ Borrowed(*const raw::git_transfer_progress), ++ Owned(raw::git_transfer_progress), ++} ++ ++/// Callback used to acquire credentials for when a remote is fetched. ++/// ++/// * `url` - the resource for which the credentials are required. ++/// * `username_from_url` - the username that was embedded in the url, or `None` ++/// if it was not included. ++/// * `allowed_types` - a bitmask stating which cred types are ok to return. ++pub type Credentials<'a> = FnMut(&str, Option<&str>, CredentialType) ++ -> Result + 'a; ++ ++/// Callback to be invoked while a transfer is in progress. ++/// ++/// This callback will be periodically called with updates to the progress of ++/// the transfer so far. The return value indicates whether the transfer should ++/// continue. A return value of `false` will cancel the transfer. ++/// ++/// * `progress` - the progress being made so far. ++pub type TransferProgress<'a> = FnMut(Progress) -> bool + 'a; ++ ++/// Callback for receiving messages delivered by the transport. ++/// ++/// The return value indicates whether the network operation should continue. ++pub type TransportMessage<'a> = FnMut(&[u8]) -> bool + 'a; ++ ++/// Callback for whenever a reference is updated locally. ++pub type UpdateTips<'a> = FnMut(&str, Oid, Oid) -> bool + 'a; ++ ++/// Callback for a custom certificate check. ++/// ++/// The first argument is the certificate receved on the connection. ++/// Certificates are typically either an SSH or X509 certificate. ++/// ++/// The second argument is the hostname for the connection is passed as the last ++/// argument. ++pub type CertificateCheck<'a> = FnMut(&Cert, &str) -> bool + 'a; ++ ++/// Callback for each updated reference on push. ++/// ++/// The first argument here is the `refname` of the reference, and the second is ++/// the status message sent by a server. If the status is `Some` then the update ++/// was rejected by the remote server with a reason why. ++pub type PushUpdateReference<'a> = FnMut(&str, Option<&str>) -> Result<(), Error> + 'a; ++ ++impl<'a> Default for RemoteCallbacks<'a> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl<'a> RemoteCallbacks<'a> { ++ /// Creates a new set of empty callbacks ++ pub fn new() -> RemoteCallbacks<'a> { ++ RemoteCallbacks { ++ credentials: None, ++ progress: None, ++ sideband_progress: None, ++ update_tips: None, ++ certificate_check: None, ++ push_update_reference: None, ++ } ++ } ++ ++ /// The callback through which to fetch credentials if required. ++ pub fn credentials(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(&str, Option<&str>, CredentialType) ++ -> Result + 'a ++ { ++ self.credentials = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// The callback through which progress is monitored. ++ pub fn transfer_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(Progress) -> bool + 'a { ++ self.progress = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// Textual progress from the remote. ++ /// ++ /// Text sent over the progress side-band will be passed to this function ++ /// (this is the 'counting objects' output. ++ pub fn sideband_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(&[u8]) -> bool + 'a { ++ self.sideband_progress = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// Each time a reference is updated locally, the callback will be called ++ /// with information about it. ++ pub fn update_tips(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(&str, Oid, Oid) -> bool + 'a { ++ self.update_tips = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// If certificate verification fails, then this callback will be invoked to ++ /// let the caller make the final decision of whether to allow the ++ /// connection to proceed. ++ pub fn certificate_check(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(&Cert, &str) -> bool + 'a ++ { ++ self.certificate_check = Some(Box::new(cb) as Box>); ++ self ++ } ++ ++ /// Set a callback to get invoked for each updated reference on a push. ++ /// ++ /// The first argument to the callback is the name of the reference and the ++ /// second is a status message sent by the server. If the status is `Some` ++ /// then the push was rejected. ++ pub fn push_update_reference(&mut self, cb: F) -> &mut RemoteCallbacks<'a> ++ where F: FnMut(&str, Option<&str>) -> Result<(), Error> + 'a, ++ { ++ self.push_update_reference = Some(Box::new(cb) as Box>); ++ self ++ } ++} ++ ++impl<'a> Binding for RemoteCallbacks<'a> { ++ type Raw = raw::git_remote_callbacks; ++ unsafe fn from_raw(_raw: raw::git_remote_callbacks) -> RemoteCallbacks<'a> { ++ panic!("unimplemented"); ++ } ++ ++ fn raw(&self) -> raw::git_remote_callbacks { ++ unsafe { ++ let mut callbacks: raw::git_remote_callbacks = mem::zeroed(); ++ assert_eq!(raw::git_remote_init_callbacks(&mut callbacks, ++ raw::GIT_REMOTE_CALLBACKS_VERSION), 0); ++ if self.progress.is_some() { ++ let f: raw::git_transfer_progress_cb = transfer_progress_cb; ++ callbacks.transfer_progress = Some(f); ++ } ++ if self.credentials.is_some() { ++ let f: raw::git_cred_acquire_cb = credentials_cb; ++ callbacks.credentials = Some(f); ++ } ++ if self.sideband_progress.is_some() { ++ let f: raw::git_transport_message_cb = sideband_progress_cb; ++ callbacks.sideband_progress = Some(f); ++ } ++ if self.certificate_check.is_some() { ++ let f: raw::git_transport_certificate_check_cb = ++ certificate_check_cb; ++ callbacks.certificate_check = Some(f); ++ } ++ if self.push_update_reference.is_some() { ++ let f: extern fn(_, _, _) -> c_int = push_update_reference_cb; ++ callbacks.push_update_reference = Some(f); ++ } ++ if self.update_tips.is_some() { ++ let f: extern fn(*const c_char, *const raw::git_oid, ++ *const raw::git_oid, *mut c_void) -> c_int ++ = update_tips_cb; ++ callbacks.update_tips = Some(f); ++ } ++ callbacks.payload = self as *const _ as *mut _; ++ callbacks ++ } ++ } ++} ++ ++impl<'a> Progress<'a> { ++ /// Number of objects in the packfile being downloaded ++ pub fn total_objects(&self) -> usize { ++ unsafe { (*self.raw()).total_objects as usize } ++ } ++ /// Received objects that have been hashed ++ pub fn indexed_objects(&self) -> usize { ++ unsafe { (*self.raw()).indexed_objects as usize } ++ } ++ /// Objects which have been downloaded ++ pub fn received_objects(&self) -> usize { ++ unsafe { (*self.raw()).received_objects as usize } ++ } ++ /// Locally-available objects that have been injected in order to fix a thin ++ /// pack. ++ pub fn local_objects(&self) -> usize { ++ unsafe { (*self.raw()).local_objects as usize } ++ } ++ /// Number of deltas in the packfile being downloaded ++ pub fn total_deltas(&self) -> usize { ++ unsafe { (*self.raw()).total_deltas as usize } ++ } ++ /// Received deltas that have been hashed. ++ pub fn indexed_deltas(&self) -> usize { ++ unsafe { (*self.raw()).indexed_deltas as usize } ++ } ++ /// Size of the packfile received up to now ++ pub fn received_bytes(&self) -> usize { ++ unsafe { (*self.raw()).received_bytes as usize } ++ } ++ ++ /// Convert this to an owned version of `Progress`. ++ pub fn to_owned(&self) -> Progress<'static> { ++ Progress { ++ raw: ProgressState::Owned(unsafe { *self.raw() }), ++ _marker: marker::PhantomData, ++ } ++ } ++} ++ ++impl<'a> Binding for Progress<'a> { ++ type Raw = *const raw::git_transfer_progress; ++ unsafe fn from_raw(raw: *const raw::git_transfer_progress) ++ -> Progress<'a> { ++ Progress { ++ raw: ProgressState::Borrowed(raw), ++ _marker: marker::PhantomData, ++ } ++ } ++ ++ fn raw(&self) -> *const raw::git_transfer_progress { ++ match self.raw { ++ ProgressState::Borrowed(raw) => raw, ++ ProgressState::Owned(ref raw) => raw as *const _, ++ } ++ } ++} ++ ++extern fn credentials_cb(ret: *mut *mut raw::git_cred, ++ url: *const c_char, ++ username_from_url: *const c_char, ++ allowed_types: c_uint, ++ payload: *mut c_void) -> c_int { ++ unsafe { ++ let ok = panic::wrap(|| { ++ let payload = &mut *(payload as *mut RemoteCallbacks); ++ let callback = try!(payload.credentials.as_mut() ++ .ok_or(raw::GIT_PASSTHROUGH as c_int)); ++ *ret = ptr::null_mut(); ++ let url = try!(str::from_utf8(CStr::from_ptr(url).to_bytes()) ++ .map_err(|_| raw::GIT_PASSTHROUGH as c_int)); ++ let username_from_url = match ::opt_bytes(&url, username_from_url) { ++ Some(username) => { ++ Some(try!(str::from_utf8(username) ++ .map_err(|_| raw::GIT_PASSTHROUGH as c_int))) ++ } ++ None => None, ++ }; ++ ++ let cred_type = CredentialType::from_bits_truncate(allowed_types as u32); ++ ++ callback(url, username_from_url, cred_type).map_err(|e| { ++ let s = CString::new(e.to_string()).unwrap(); ++ raw::giterr_set_str(e.raw_code() as c_int, s.as_ptr()); ++ e.raw_code() as c_int ++ }) ++ }); ++ match ok { ++ Some(Ok(cred)) => { ++ // Turns out it's a memory safety issue if we pass through any ++ // and all credentials into libgit2 ++ if allowed_types & (cred.credtype() as c_uint) != 0 { ++ *ret = cred.unwrap(); ++ 0 ++ } else { ++ raw::GIT_PASSTHROUGH as c_int ++ } ++ } ++ Some(Err(e)) => e, ++ None => -1, ++ } ++ } ++} ++ ++extern fn transfer_progress_cb(stats: *const raw::git_transfer_progress, ++ payload: *mut c_void) -> c_int { ++ let ok = panic::wrap(|| unsafe { ++ let payload = &mut *(payload as *mut RemoteCallbacks); ++ let callback = match payload.progress { ++ Some(ref mut c) => c, ++ None => return true, ++ }; ++ let progress = Binding::from_raw(stats); ++ callback(progress) ++ }); ++ if ok == Some(true) {0} else {-1} ++} ++ ++extern fn sideband_progress_cb(str: *const c_char, ++ len: c_int, ++ payload: *mut c_void) -> c_int { ++ let ok = panic::wrap(|| unsafe { ++ let payload = &mut *(payload as *mut RemoteCallbacks); ++ let callback = match payload.sideband_progress { ++ Some(ref mut c) => c, ++ None => return true, ++ }; ++ let buf = slice::from_raw_parts(str as *const u8, len as usize); ++ callback(buf) ++ }); ++ if ok == Some(true) {0} else {-1} ++} ++ ++extern fn update_tips_cb(refname: *const c_char, ++ a: *const raw::git_oid, ++ b: *const raw::git_oid, ++ data: *mut c_void) -> c_int { ++ let ok = panic::wrap(|| unsafe { ++ let payload = &mut *(data as *mut RemoteCallbacks); ++ let callback = match payload.update_tips { ++ Some(ref mut c) => c, ++ None => return true, ++ }; ++ let refname = str::from_utf8(CStr::from_ptr(refname).to_bytes()) ++ .unwrap(); ++ let a = Binding::from_raw(a); ++ let b = Binding::from_raw(b); ++ callback(refname, a, b) ++ }); ++ if ok == Some(true) {0} else {-1} ++} ++ ++extern fn certificate_check_cb(cert: *mut raw::git_cert, ++ _valid: c_int, ++ hostname: *const c_char, ++ data: *mut c_void) -> c_int { ++ let ok = panic::wrap(|| unsafe { ++ let payload = &mut *(data as *mut RemoteCallbacks); ++ let callback = match payload.certificate_check { ++ Some(ref mut c) => c, ++ None => return true, ++ }; ++ let cert = Binding::from_raw(cert); ++ let hostname = str::from_utf8(CStr::from_ptr(hostname).to_bytes()) ++ .unwrap(); ++ callback(&cert, hostname) ++ }); ++ if ok == Some(true) {0} else {-1} ++} ++ ++extern fn push_update_reference_cb(refname: *const c_char, ++ status: *const c_char, ++ data: *mut c_void) -> c_int { ++ panic::wrap(|| unsafe { ++ let payload = &mut *(data as *mut RemoteCallbacks); ++ let callback = match payload.push_update_reference { ++ Some(ref mut c) => c, ++ None => return 0, ++ }; ++ let refname = str::from_utf8(CStr::from_ptr(refname).to_bytes()) ++ .unwrap(); ++ let status = if status.is_null() { ++ None ++ } else { ++ Some(str::from_utf8(CStr::from_ptr(status).to_bytes()).unwrap()) ++ }; ++ match callback(refname, status) { ++ Ok(()) => 0, ++ Err(e) => e.raw_code(), ++ } ++ }).unwrap_or(-1) ++} diff --cc vendor/git2-0.7.5/src/repo.rs index 000000000,000000000..8b8edf48a new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/repo.rs @@@ -1,0 -1,0 +1,2562 @@@ ++use std::env; ++use std::ffi::{CStr, CString, OsStr}; ++use std::iter::IntoIterator; ++use std::mem; ++use std::path::Path; ++use std::ptr; ++use std::str; ++use libc::{c_int, c_char, size_t, c_void, c_uint}; ++ ++use {raw, Revspec, Error, init, Object, RepositoryOpenFlags, RepositoryState, Remote, Buf, StashFlags}; ++use {ResetType, Signature, Reference, References, Submodule, Blame, BlameOptions}; ++use {Branches, BranchType, Index, Config, Oid, Blob, BlobWriter, Branch, Commit, Tree}; ++use {AnnotatedCommit, MergeOptions, SubmoduleIgnore, SubmoduleStatus, MergeAnalysis, MergePreference}; ++use {ObjectType, Tag, Note, Notes, StatusOptions, Statuses, Status, Revwalk}; ++use {RevparseMode, RepositoryInitMode, Reflog, IntoCString, Describe}; ++use {DescribeOptions, TreeBuilder, Diff, DiffOptions, PackBuilder, Odb}; ++use build::{RepoBuilder, CheckoutBuilder}; ++use stash::{StashApplyOptions, StashCbData, stash_cb}; ++use string_array::StringArray; ++use oid_array::OidArray; ++use util::{self, Binding}; ++ ++/// An owned git repository, representing all state associated with the ++/// underlying filesystem. ++/// ++/// This structure corresponds to a `git_repository` in libgit2. Many other ++/// types in git2-rs are derivative from this structure and are attached to its ++/// lifetime. ++/// ++/// When a repository goes out of scope it is freed in memory but not deleted ++/// from the filesystem. ++pub struct Repository { ++ raw: *mut raw::git_repository, ++} ++ ++// It is the current belief that a `Repository` can be sent among threads, or ++// even shared among threads in a mutex. ++unsafe impl Send for Repository {} ++ ++/// Options which can be used to configure how a repository is initialized ++pub struct RepositoryInitOptions { ++ flags: u32, ++ mode: u32, ++ workdir_path: Option, ++ description: Option, ++ template_path: Option, ++ initial_head: Option, ++ origin_url: Option, ++} ++ ++impl Repository { ++ /// Attempt to open an already-existing repository at `path`. ++ /// ++ /// The path can point to either a normal or bare repository. ++ pub fn open>(path: P) -> Result { ++ init(); ++ let path = try!(path.as_ref().into_c_string()); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_open(&mut ret, path)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Attempt to open an already-existing bare repository at `path`. ++ /// ++ /// The path can point to only a bare repository. ++ pub fn open_bare>(path: P) -> Result { ++ init(); ++ let path = try!(path.as_ref().into_c_string()); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_open_bare(&mut ret, path)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Find and open an existing repository, respecting git environment ++ /// variables. This acts like `open_ext` with the ++ /// `REPOSITORY_OPEN_FROM_ENV` flag, but additionally respects `$GIT_DIR`. ++ /// With `$GIT_DIR` unset, this will search for a repository starting in ++ /// the current directory. ++ pub fn open_from_env() -> Result { ++ init(); ++ let mut ret = ptr::null_mut(); ++ let flags = raw::GIT_REPOSITORY_OPEN_FROM_ENV; ++ unsafe { ++ try_call!(raw::git_repository_open_ext(&mut ret, ++ ptr::null(), ++ flags as c_uint, ++ ptr::null())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Find and open an existing repository, with additional options. ++ /// ++ /// If flags contains REPOSITORY_OPEN_NO_SEARCH, the path must point ++ /// directly to a repository; otherwise, this may point to a subdirectory ++ /// of a repository, and `open_ext` will search up through parent ++ /// directories. ++ /// ++ /// If flags contains REPOSITORY_OPEN_CROSS_FS, the search through parent ++ /// directories will not cross a filesystem boundary (detected when the ++ /// stat st_dev field changes). ++ /// ++ /// If flags contains REPOSITORY_OPEN_BARE, force opening the repository as ++ /// bare even if it isn't, ignoring any working directory, and defer ++ /// loading the repository configuration for performance. ++ /// ++ /// If flags contains REPOSITORY_OPEN_NO_DOTGIT, don't try appending ++ /// `/.git` to `path`. ++ /// ++ /// If flags contains REPOSITORY_OPEN_FROM_ENV, `open_ext` will ignore ++ /// other flags and `ceiling_dirs`, and respect the same environment ++ /// variables git does. Note, however, that `path` overrides `$GIT_DIR`; to ++ /// respect `$GIT_DIR` as well, use `open_from_env`. ++ /// ++ /// ceiling_dirs specifies a list of paths that the search through parent ++ /// directories will stop before entering. Use the functions in std::env ++ /// to construct or manipulate such a path list. ++ pub fn open_ext(path: P, ++ flags: RepositoryOpenFlags, ++ ceiling_dirs: I) ++ -> Result ++ where P: AsRef, O: AsRef, I: IntoIterator ++ { ++ init(); ++ let path = try!(path.as_ref().into_c_string()); ++ let ceiling_dirs_os = try!(env::join_paths(ceiling_dirs)); ++ let ceiling_dirs = try!(ceiling_dirs_os.into_c_string()); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_open_ext(&mut ret, ++ path, ++ flags.bits() as c_uint, ++ ceiling_dirs)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Attempt to open an already-existing repository at or above `path` ++ /// ++ /// This starts at `path` and looks up the filesystem hierarchy ++ /// until it finds a repository. ++ pub fn discover>(path: P) -> Result { ++ // TODO: this diverges significantly from the libgit2 API ++ init(); ++ let buf = Buf::new(); ++ let path = try!(path.as_ref().into_c_string()); ++ unsafe { ++ try_call!(raw::git_repository_discover(buf.raw(), path, 1, ++ ptr::null())); ++ } ++ Repository::open(util::bytes2path(&*buf)) ++ } ++ ++ /// Creates a new repository in the specified folder. ++ /// ++ /// This by default will create any necessary directories to create the ++ /// repository, and it will read any user-specified templates when creating ++ /// the repository. This behavior can be configured through `init_opts`. ++ pub fn init>(path: P) -> Result { ++ Repository::init_opts(path, &RepositoryInitOptions::new()) ++ } ++ ++ /// Creates a new `--bare` repository in the specified folder. ++ /// ++ /// The folder must exist prior to invoking this function. ++ pub fn init_bare>(path: P) -> Result { ++ Repository::init_opts(path, RepositoryInitOptions::new().bare(true)) ++ } ++ ++ /// Creates a new `--bare` repository in the specified folder. ++ /// ++ /// The folder must exist prior to invoking this function. ++ pub fn init_opts>(path: P, opts: &RepositoryInitOptions) ++ -> Result { ++ init(); ++ let path = try!(path.as_ref().into_c_string()); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ let mut opts = opts.raw(); ++ try_call!(raw::git_repository_init_ext(&mut ret, path, &mut opts)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Clone a remote repository. ++ /// ++ /// See the `RepoBuilder` struct for more information. This function will ++ /// delegate to a fresh `RepoBuilder` ++ pub fn clone>(url: &str, into: P) ++ -> Result { ++ ::init(); ++ RepoBuilder::new().clone(url, into.as_ref()) ++ } ++ ++ /// Clone a remote repository, initialize and update its submodules ++ /// recursively. ++ /// ++ /// This is similar to `git clone --recursive`. ++ pub fn clone_recurse>(url: &str, into: P) ++ -> Result { ++ let repo = Repository::clone(url, into)?; ++ repo.update_submodules()?; ++ Ok(repo) ++ } ++ ++ /// Attempt to wrap an object database as a repository. ++ pub fn from_odb(odb: Odb) -> Result { ++ init(); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_wrap_odb(&mut ret, odb.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Update submodules recursively. ++ /// ++ /// Uninitialized submodules will be initialized. ++ fn update_submodules(&self) -> Result<(), Error> { ++ ++ fn add_subrepos(repo: &Repository, list: &mut Vec) ++ -> Result<(), Error> { ++ for mut subm in repo.submodules()? { ++ subm.update(true, None)?; ++ list.push(subm.open()?); ++ } ++ Ok(()) ++ } ++ ++ let mut repos = Vec::new(); ++ add_subrepos(self, &mut repos)?; ++ while let Some(repo) = repos.pop() { ++ add_subrepos(&repo, &mut repos)?; ++ } ++ Ok(()) ++ } ++ ++ /// Execute a rev-parse operation against the `spec` listed. ++ /// ++ /// The resulting revision specification is returned, or an error is ++ /// returned if one occurs. ++ pub fn revparse(&self, spec: &str) -> Result { ++ let mut raw = raw::git_revspec { ++ from: ptr::null_mut(), ++ to: ptr::null_mut(), ++ flags: 0, ++ }; ++ let spec = try!(CString::new(spec)); ++ unsafe { ++ try_call!(raw::git_revparse(&mut raw, self.raw, spec)); ++ let to = Binding::from_raw_opt(raw.to); ++ let from = Binding::from_raw_opt(raw.from); ++ let mode = RevparseMode::from_bits_truncate(raw.flags as u32); ++ Ok(Revspec::from_objects(from, to, mode)) ++ } ++ } ++ ++ /// Find a single object, as specified by a revision string. ++ pub fn revparse_single(&self, spec: &str) -> Result { ++ let spec = try!(CString::new(spec)); ++ let mut obj = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_revparse_single(&mut obj, self.raw, spec)); ++ assert!(!obj.is_null()); ++ Ok(Binding::from_raw(obj)) ++ } ++ } ++ ++ /// Find a single object and intermediate reference by a revision string. ++ /// ++ /// See `man gitrevisions`, or ++ /// http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for ++ /// information on the syntax accepted. ++ /// ++ /// In some cases (`@{<-n>}` or `@{upstream}`), the expression ++ /// may point to an intermediate reference. When such expressions are being ++ /// passed in, this intermediate reference is returned. ++ pub fn revparse_ext(&self, spec: &str) ++ -> Result<(Object, Option), Error> { ++ let spec = try!(CString::new(spec)); ++ let mut git_obj = ptr::null_mut(); ++ let mut git_ref = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_revparse_ext(&mut git_obj, &mut git_ref, ++ self.raw, spec)); ++ assert!(!git_obj.is_null()); ++ Ok((Binding::from_raw(git_obj), Binding::from_raw_opt(git_ref))) ++ } ++ } ++ ++ /// Tests whether this repository is a bare repository or not. ++ pub fn is_bare(&self) -> bool { ++ unsafe { raw::git_repository_is_bare(self.raw) == 1 } ++ } ++ ++ /// Tests whether this repository is a shallow clone. ++ pub fn is_shallow(&self) -> bool { ++ unsafe { raw::git_repository_is_shallow(self.raw) == 1 } ++ } ++ ++ /// Tests whether this repository is a worktree. ++ pub fn is_worktree(&self) -> bool { ++ unsafe { raw::git_repository_is_worktree(self.raw) == 1 } ++ } ++ ++ /// Tests whether this repository is empty. ++ pub fn is_empty(&self) -> Result { ++ let empty = unsafe { ++ try_call!(raw::git_repository_is_empty(self.raw)) ++ }; ++ Ok(empty == 1) ++ } ++ ++ /// Returns the path to the `.git` folder for normal repositories or the ++ /// repository itself for bare repositories. ++ pub fn path(&self) -> &Path { ++ unsafe { ++ let ptr = raw::git_repository_path(self.raw); ++ util::bytes2path(::opt_bytes(self, ptr).unwrap()) ++ } ++ } ++ ++ /// Returns the current state of this repository ++ pub fn state(&self) -> RepositoryState { ++ let state = unsafe { raw::git_repository_state(self.raw) }; ++ macro_rules! check( ($($raw:ident => $real:ident),*) => ( ++ $(if state == raw::$raw as c_int { ++ super::RepositoryState::$real ++ }) else * ++ else { ++ panic!("unknown repository state: {}", state) ++ } ++ ) ); ++ ++ check!( ++ GIT_REPOSITORY_STATE_NONE => Clean, ++ GIT_REPOSITORY_STATE_MERGE => Merge, ++ GIT_REPOSITORY_STATE_REVERT => Revert, ++ GIT_REPOSITORY_STATE_REVERT_SEQUENCE => RevertSequence, ++ GIT_REPOSITORY_STATE_CHERRYPICK => CherryPick, ++ GIT_REPOSITORY_STATE_CHERRYPICK_SEQUENCE => CherryPickSequence, ++ GIT_REPOSITORY_STATE_BISECT => Bisect, ++ GIT_REPOSITORY_STATE_REBASE => Rebase, ++ GIT_REPOSITORY_STATE_REBASE_INTERACTIVE => RebaseInteractive, ++ GIT_REPOSITORY_STATE_REBASE_MERGE => RebaseMerge, ++ GIT_REPOSITORY_STATE_APPLY_MAILBOX => ApplyMailbox, ++ GIT_REPOSITORY_STATE_APPLY_MAILBOX_OR_REBASE => ApplyMailboxOrRebase ++ ) ++ } ++ ++ /// Get the path of the working directory for this repository. ++ /// ++ /// If this repository is bare, then `None` is returned. ++ pub fn workdir(&self) -> Option<&Path> { ++ unsafe { ++ let ptr = raw::git_repository_workdir(self.raw); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(util::bytes2path(CStr::from_ptr(ptr).to_bytes())) ++ } ++ } ++ } ++ ++ /// Set the path to the working directory for this repository. ++ /// ++ /// If `update_link` is true, create/update the gitlink file in the workdir ++ /// and set config "core.worktree" (if workdir is not the parent of the .git ++ /// directory). ++ pub fn set_workdir(&self, path: &Path, update_gitlink: bool) ++ -> Result<(), Error> { ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_repository_set_workdir(self.raw(), path, ++ update_gitlink)); ++ } ++ Ok(()) ++ } ++ ++ /// Get the currently active namespace for this repository. ++ /// ++ /// If there is no namespace, or the namespace is not a valid utf8 string, ++ /// `None` is returned. ++ pub fn namespace(&self) -> Option<&str> { ++ self.namespace_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the currently active namespace for this repository as a byte array. ++ /// ++ /// If there is no namespace, `None` is returned. ++ pub fn namespace_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_repository_get_namespace(self.raw)) } ++ } ++ ++ /// Set the active namespace for this repository. ++ pub fn set_namespace(&self, namespace: &str) -> Result<(), Error> { ++ self.set_namespace_bytes(namespace.as_bytes()) ++ } ++ ++ /// Set the active namespace for this repository as a byte array. ++ pub fn set_namespace_bytes(&self, namespace: &[u8]) -> Result<(), Error> { ++ unsafe { ++ let namespace = try!(CString::new(namespace)); ++ try_call!(raw::git_repository_set_namespace(self.raw, ++ namespace)); ++ Ok(()) ++ } ++ } ++ ++ /// Remove the active namespace for this repository. ++ pub fn remove_namespace(&self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_repository_set_namespace(self.raw, ++ ptr::null())); ++ Ok(()) ++ } ++ } ++ ++ /// Retrieves the Git merge message. ++ /// Remember to remove the message when finished. ++ pub fn message(&self) -> Result { ++ unsafe { ++ let buf = Buf::new(); ++ try_call!(raw::git_repository_message(buf.raw(), self.raw)); ++ Ok(str::from_utf8(&buf).unwrap().to_string()) ++ } ++ } ++ ++ /// Remove the Git merge message. ++ pub fn remove_message(&self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_repository_message_remove(self.raw)); ++ Ok(()) ++ } ++ } ++ ++ /// List all remotes for a given repository ++ pub fn remotes(&self) -> Result { ++ let mut arr = raw::git_strarray { ++ strings: 0 as *mut *mut c_char, ++ count: 0, ++ }; ++ unsafe { ++ try_call!(raw::git_remote_list(&mut arr, self.raw)); ++ Ok(Binding::from_raw(arr)) ++ } ++ } ++ ++ /// Get the information for a particular remote ++ pub fn find_remote(&self, name: &str) -> Result { ++ let mut ret = ptr::null_mut(); ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_remote_lookup(&mut ret, self.raw, name)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Add a remote with the default fetch refspec to the repository's ++ /// configuration. ++ pub fn remote(&self, name: &str, url: &str) -> Result { ++ let mut ret = ptr::null_mut(); ++ let name = try!(CString::new(name)); ++ let url = try!(CString::new(url)); ++ unsafe { ++ try_call!(raw::git_remote_create(&mut ret, self.raw, name, url)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create an anonymous remote ++ /// ++ /// Create a remote with the given url and refspec in memory. You can use ++ /// this when you have a URL instead of a remote's name. Note that anonymous ++ /// remotes cannot be converted to persisted remotes. ++ pub fn remote_anonymous(&self, url: &str) -> Result { ++ let mut ret = ptr::null_mut(); ++ let url = try!(CString::new(url)); ++ unsafe { ++ try_call!(raw::git_remote_create_anonymous(&mut ret, self.raw, url)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Give a remote a new name ++ /// ++ /// All remote-tracking branches and configuration settings for the remote ++ /// are updated. ++ /// ++ /// A temporary in-memory remote cannot be given a name with this method. ++ /// ++ /// No loaded instances of the remote with the old name will change their ++ /// name or their list of refspecs. ++ /// ++ /// The returned array of strings is a list of the non-default refspecs ++ /// which cannot be renamed and are returned for further processing by the ++ /// caller. ++ pub fn remote_rename(&self, name: &str, ++ new_name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let new_name = try!(CString::new(new_name)); ++ let mut problems = raw::git_strarray { ++ count: 0, ++ strings: 0 as *mut *mut c_char, ++ }; ++ unsafe { ++ try_call!(raw::git_remote_rename(&mut problems, self.raw, name, ++ new_name)); ++ Ok(Binding::from_raw(problems)) ++ } ++ } ++ ++ /// Delete an existing persisted remote. ++ /// ++ /// All remote-tracking branches and configuration settings for the remote ++ /// will be removed. ++ pub fn remote_delete(&self, name: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { try_call!(raw::git_remote_delete(self.raw, name)); } ++ Ok(()) ++ } ++ ++ /// Add a fetch refspec to the remote's configuration ++ /// ++ /// Add the given refspec to the fetch list in the configuration. No loaded ++ /// remote instances will be affected. ++ pub fn remote_add_fetch(&self, name: &str, spec: &str) ++ -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let spec = try!(CString::new(spec)); ++ unsafe { ++ try_call!(raw::git_remote_add_fetch(self.raw, name, spec)); ++ } ++ Ok(()) ++ } ++ ++ /// Add a push refspec to the remote's configuration. ++ /// ++ /// Add the given refspec to the push list in the configuration. No ++ /// loaded remote instances will be affected. ++ pub fn remote_add_push(&self, name: &str, spec: &str) ++ -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let spec = try!(CString::new(spec)); ++ unsafe { ++ try_call!(raw::git_remote_add_push(self.raw, name, spec)); ++ } ++ Ok(()) ++ } ++ ++ /// Set the remote's url in the configuration ++ /// ++ /// Remote objects already in memory will not be affected. This assumes ++ /// the common case of a single-url remote and will otherwise return an ++ /// error. ++ pub fn remote_set_url(&self, name: &str, url: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let url = try!(CString::new(url)); ++ unsafe { try_call!(raw::git_remote_set_url(self.raw, name, url)); } ++ Ok(()) ++ } ++ ++ /// Set the remote's url for pushing in the configuration. ++ /// ++ /// Remote objects already in memory will not be affected. This assumes ++ /// the common case of a single-url remote and will otherwise return an ++ /// error. ++ /// ++ /// `None` indicates that it should be cleared. ++ pub fn remote_set_pushurl(&self, name: &str, pushurl: Option<&str>) ++ -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ let pushurl = try!(::opt_cstr(pushurl)); ++ unsafe { ++ try_call!(raw::git_remote_set_pushurl(self.raw, name, pushurl)); ++ } ++ Ok(()) ++ } ++ ++ /// Sets the current head to the specified object and optionally resets ++ /// the index and working tree to match. ++ /// ++ /// A soft reset means the head will be moved to the commit. ++ /// ++ /// A mixed reset will trigger a soft reset, plus the index will be ++ /// replaced with the content of the commit tree. ++ /// ++ /// A hard reset will trigger a mixed reset and the working directory will ++ /// be replaced with the content of the index. (Untracked and ignored files ++ /// will be left alone, however.) ++ /// ++ /// The `target` is a commit-ish to which the head should be moved to. The ++ /// object can either be a commit or a tag, but tags must be dereferenceable ++ /// to a commit. ++ /// ++ /// The `checkout` options will only be used for a hard reset. ++ pub fn reset(&self, ++ target: &Object, ++ kind: ResetType, ++ checkout: Option<&mut CheckoutBuilder>) ++ -> Result<(), Error> { ++ unsafe { ++ let mut opts: raw::git_checkout_options = mem::zeroed(); ++ try_call!(raw::git_checkout_init_options(&mut opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION)); ++ let opts = checkout.map(|c| { ++ c.configure(&mut opts); &mut opts ++ }); ++ try_call!(raw::git_reset(self.raw, target.raw(), kind, opts)); ++ } ++ Ok(()) ++ } ++ ++ /// Updates some entries in the index from the target commit tree. ++ /// ++ /// The scope of the updated entries is determined by the paths being ++ /// in the iterator provided. ++ /// ++ /// Passing a `None` target will result in removing entries in the index ++ /// matching the provided pathspecs. ++ pub fn reset_default(&self, ++ target: Option<&Object>, ++ paths: I) -> Result<(), Error> ++ where T: IntoCString, I: IntoIterator, ++ { ++ let (_a, _b, mut arr) = try!(::util::iter2cstrs(paths)); ++ let target = target.map(|t| t.raw()); ++ unsafe { ++ try_call!(raw::git_reset_default(self.raw, target, &mut arr)); ++ } ++ Ok(()) ++ } ++ ++ /// Retrieve and resolve the reference pointed at by HEAD. ++ pub fn head(&self) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_head(&mut ret, self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Make the repository HEAD point to the specified reference. ++ /// ++ /// If the provided reference points to a tree or a blob, the HEAD is ++ /// unaltered and an error is returned. ++ /// ++ /// If the provided reference points to a branch, the HEAD will point to ++ /// that branch, staying attached, or become attached if it isn't yet. If ++ /// the branch doesn't exist yet, no error will be returned. The HEAD will ++ /// then be attached to an unborn branch. ++ /// ++ /// Otherwise, the HEAD will be detached and will directly point to the ++ /// commit. ++ pub fn set_head(&self, refname: &str) -> Result<(), Error> { ++ let refname = try!(CString::new(refname)); ++ unsafe { ++ try_call!(raw::git_repository_set_head(self.raw, refname)); ++ } ++ Ok(()) ++ } ++ ++ /// Determines whether the repository HEAD is detached. ++ pub fn head_detached(&self) -> Result { ++ unsafe { ++ let value = raw::git_repository_head_detached(self.raw); ++ match value { ++ 0 => Ok(false), ++ 1 => Ok(true), ++ _ => Err(Error::last_error(value).unwrap()) ++ } ++ } ++ } ++ ++ /// Make the repository HEAD directly point to the commit. ++ /// ++ /// If the provided committish cannot be found in the repository, the HEAD ++ /// is unaltered and an error is returned. ++ /// ++ /// If the provided commitish cannot be peeled into a commit, the HEAD is ++ /// unaltered and an error is returned. ++ /// ++ /// Otherwise, the HEAD will eventually be detached and will directly point ++ /// to the peeled commit. ++ pub fn set_head_detached(&self, commitish: Oid) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_repository_set_head_detached(self.raw, ++ commitish.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Create an iterator for the repo's references ++ pub fn references(&self) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_iterator_new(&mut ret, self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create an iterator for the repo's references that match the specified ++ /// glob ++ pub fn references_glob(&self, glob: &str) -> Result { ++ let mut ret = ptr::null_mut(); ++ let glob = try!(CString::new(glob)); ++ unsafe { ++ try_call!(raw::git_reference_iterator_glob_new(&mut ret, self.raw, ++ glob)); ++ ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Load all submodules for this repository and return them. ++ pub fn submodules(&self) -> Result, Error> { ++ struct Data<'a, 'b:'a> { ++ repo: &'b Repository, ++ ret: &'a mut Vec>, ++ } ++ let mut ret = Vec::new(); ++ ++ unsafe { ++ let mut data = Data { ++ repo: self, ++ ret: &mut ret, ++ }; ++ try_call!(raw::git_submodule_foreach(self.raw, append, ++ &mut data as *mut _ ++ as *mut c_void)); ++ } ++ ++ return Ok(ret); ++ ++ extern fn append(_repo: *mut raw::git_submodule, ++ name: *const c_char, ++ data: *mut c_void) -> c_int { ++ unsafe { ++ let data = &mut *(data as *mut Data); ++ let mut raw = ptr::null_mut(); ++ let rc = raw::git_submodule_lookup(&mut raw, data.repo.raw(), ++ name); ++ assert_eq!(rc, 0); ++ data.ret.push(Binding::from_raw(raw)); ++ } ++ 0 ++ } ++ } ++ ++ /// Gather file status information and populate the returned structure. ++ /// ++ /// Note that if a pathspec is given in the options to filter the ++ /// status, then the results from rename detection (if you enable it) may ++ /// not be accurate. To do rename detection properly, this must be called ++ /// with no pathspec so that all files can be considered. ++ pub fn statuses(&self, options: Option<&mut StatusOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_status_list_new(&mut ret, self.raw, ++ options.map(|s| s.raw()) ++ .unwrap_or(ptr::null()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Test if the ignore rules apply to a given file. ++ /// ++ /// This function checks the ignore rules to see if they would apply to the ++ /// given file. This indicates if the file would be ignored regardless of ++ /// whether the file is already in the index or committed to the repository. ++ /// ++ /// One way to think of this is if you were to do "git add ." on the ++ /// directory containing the file, would it be added or not? ++ pub fn status_should_ignore(&self, path: &Path) -> Result { ++ let mut ret = 0 as c_int; ++ let path = try!(path.into_c_string()); ++ unsafe { ++ try_call!(raw::git_status_should_ignore(&mut ret, self.raw, ++ path)); ++ } ++ Ok(ret != 0) ++ } ++ ++ /// Get file status for a single file. ++ /// ++ /// This tries to get status for the filename that you give. If no files ++ /// match that name (in either the HEAD, index, or working directory), this ++ /// returns NotFound. ++ /// ++ /// If the name matches multiple files (for example, if the path names a ++ /// directory or if running on a case- insensitive filesystem and yet the ++ /// HEAD has two entries that both match the path), then this returns ++ /// Ambiguous because it cannot give correct results. ++ /// ++ /// This does not do any sort of rename detection. Renames require a set of ++ /// targets and because of the path filtering, there is not enough ++ /// information to check renames correctly. To check file status with rename ++ /// detection, there is no choice but to do a full `statuses` and scan ++ /// through looking for the path that you are interested in. ++ pub fn status_file(&self, path: &Path) -> Result { ++ let mut ret = 0 as c_uint; ++ let path = if cfg!(windows) { ++ // `git_status_file` dose not work with windows path separator ++ // so we convert \ to / ++ try!(::std::ffi::CString::new(path.to_string_lossy().replace('\\', "/"))) ++ } else { ++ try!(path.into_c_string()) ++ }; ++ unsafe { ++ try_call!(raw::git_status_file(&mut ret, self.raw, ++ path)); ++ } ++ Ok(Status::from_bits_truncate(ret as u32)) ++ } ++ ++ /// Create an iterator which loops over the requested branches. ++ pub fn branches(&self, filter: Option) ++ -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_branch_iterator_new(&mut raw, self.raw(), filter)); ++ Ok(Branches::from_raw(raw)) ++ } ++ } ++ ++ /// Get the Index file for this repository. ++ /// ++ /// If a custom index has not been set, the default index for the repository ++ /// will be returned (the one located in .git/index). ++ pub fn index(&self) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_index(&mut raw, self.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Set the Index file for this repository. ++ pub fn set_index(&self, index: &mut Index) { ++ unsafe { ++ raw::git_repository_set_index(self.raw(), index.raw()); ++ } ++ } ++ ++ /// Get the configuration file for this repository. ++ /// ++ /// If a configuration file has not been set, the default config set for the ++ /// repository will be returned, including global and system configurations ++ /// (if they are available). ++ pub fn config(&self) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_config(&mut raw, self.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Write an in-memory buffer to the ODB as a blob. ++ /// ++ /// The Oid returned can in turn be passed to `find_blob` to get a handle to ++ /// the blob. ++ pub fn blob(&self, data: &[u8]) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ let ptr = data.as_ptr() as *const c_void; ++ let len = data.len() as size_t; ++ try_call!(raw::git_blob_create_frombuffer(&mut raw, self.raw(), ++ ptr, len)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Read a file from the filesystem and write its content to the Object ++ /// Database as a loose blob ++ /// ++ /// The Oid returned can in turn be passed to `find_blob` to get a handle to ++ /// the blob. ++ pub fn blob_path(&self, path: &Path) -> Result { ++ let path = try!(path.into_c_string()); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_blob_create_fromdisk(&mut raw, self.raw(), ++ path)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Create a stream to write blob ++ /// ++ /// This function may need to buffer the data on disk and will in general ++ /// not be the right choice if you know the size of the data to write. ++ /// ++ /// Use `BlobWriter::commit()` to commit the write to the object db ++ /// and get the object id. ++ /// ++ /// If the `hintpath` parameter is filled, it will be used to determine ++ /// what git filters should be applied to the object before it is written ++ /// to the object database. ++ pub fn blob_writer(&self, hintpath: Option<&Path>) -> Result { ++ let path_str = match hintpath { ++ Some(path) => Some(try!(path.into_c_string())), ++ None => None, ++ }; ++ let path = match path_str { ++ Some(ref path) => path.as_ptr(), ++ None => ptr::null(), ++ }; ++ let mut out = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_blob_create_fromstream(&mut out, self.raw(), path)); ++ Ok(BlobWriter::from_raw(out)) ++ } ++ } ++ ++ /// Lookup a reference to one of the objects in a repository. ++ pub fn find_blob(&self, oid: Oid) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_blob_lookup(&mut raw, self.raw(), oid.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Get the object database for this repository ++ pub fn odb(&self) -> Result { ++ let mut odb = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_repository_odb(&mut odb, self.raw())); ++ Ok(Odb::from_raw(odb)) ++ } ++ } ++ ++ /// Create a new branch pointing at a target commit ++ /// ++ /// A new direct reference will be created pointing to this target commit. ++ /// If `force` is true and a reference already exists with the given name, ++ /// it'll be replaced. ++ pub fn branch(&self, ++ branch_name: &str, ++ target: &Commit, ++ force: bool) -> Result { ++ let branch_name = try!(CString::new(branch_name)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_branch_create(&mut raw, ++ self.raw(), ++ branch_name, ++ target.raw(), ++ force)); ++ Ok(Branch::wrap(Binding::from_raw(raw))) ++ } ++ } ++ ++ /// Lookup a branch by its name in a repository. ++ pub fn find_branch(&self, name: &str, branch_type: BranchType) ++ -> Result { ++ let name = try!(CString::new(name)); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_branch_lookup(&mut ret, self.raw(), name, ++ branch_type)); ++ Ok(Branch::wrap(Binding::from_raw(ret))) ++ } ++ } ++ ++ /// Create new commit in the repository ++ /// ++ /// If the `update_ref` is not `None`, name of the reference that will be ++ /// updated to point to this commit. If the reference is not direct, it will ++ /// be resolved to a direct reference. Use "HEAD" to update the HEAD of the ++ /// current branch and make it point to this commit. If the reference ++ /// doesn't exist yet, it will be created. If it does exist, the first ++ /// parent must be the tip of this branch. ++ pub fn commit(&self, ++ update_ref: Option<&str>, ++ author: &Signature, ++ committer: &Signature, ++ message: &str, ++ tree: &Tree, ++ parents: &[&Commit]) -> Result { ++ let update_ref = try!(::opt_cstr(update_ref)); ++ let mut parent_ptrs = parents.iter().map(|p| { ++ p.raw() as *const raw::git_commit ++ }).collect::>(); ++ let message = try!(CString::new(message)); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_commit_create(&mut raw, ++ self.raw(), ++ update_ref, ++ author.raw(), ++ committer.raw(), ++ ptr::null(), ++ message, ++ tree.raw(), ++ parents.len() as size_t, ++ parent_ptrs.as_mut_ptr())); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Create a commit object from the given buffer and signature ++ /// ++ /// Given the unsigned commit object's contents, its signature and the ++ /// header field in which to store the signature, attach the signature to ++ /// the commit and write it into the given repository. ++ /// ++ /// Use `None` in `signature_field` to use the default of `gpgsig`, which is ++ /// almost certainly what you want. ++ /// ++ /// Returns the resulting (signed) commit id. ++ pub fn commit_signed(&self, ++ commit_content: &str, ++ signature: &str, ++ signature_field: Option<&str>) -> Result { ++ let commit_content = try!(CString::new(commit_content)); ++ let signature = try!(CString::new(signature)); ++ let signature_field = try!(::opt_cstr(signature_field)); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_commit_create_with_signature(&mut raw, ++ self.raw(), ++ commit_content, ++ signature, ++ signature_field)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ ++ /// Extract the signature from a commit ++ /// ++ /// Returns a tuple containing the signature in the first value and the ++ /// signed data in the second. ++ pub fn extract_signature(&self, ++ commit_id: &Oid, ++ signature_field: Option<&str>) ++ -> Result<(Buf, Buf), Error> { ++ let signature_field = try!(::opt_cstr(signature_field)); ++ let signature = Buf::new(); ++ let content = Buf::new(); ++ unsafe { ++ try_call!(raw::git_commit_extract_signature(signature.raw(), ++ content.raw(), ++ self.raw(), ++ commit_id.raw() as *mut _, ++ signature_field)); ++ Ok((signature, content)) ++ } ++ } ++ ++ ++ /// Lookup a reference to one of the commits in a repository. ++ pub fn find_commit(&self, oid: Oid) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_commit_lookup(&mut raw, self.raw(), oid.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Creates a `AnnotatedCommit` from the given commit id. ++ pub fn find_annotated_commit(&self, id: Oid) -> Result { ++ unsafe { ++ let mut raw = 0 as *mut raw::git_annotated_commit; ++ try_call!(raw::git_annotated_commit_lookup(&mut raw, self.raw(), id.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Lookup a reference to one of the objects in a repository. ++ pub fn find_object(&self, oid: Oid, ++ kind: Option) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_object_lookup(&mut raw, self.raw(), oid.raw(), ++ kind)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new direct reference. ++ /// ++ /// This function will return an error if a reference already exists with ++ /// the given name unless force is true, in which case it will be ++ /// overwritten. ++ pub fn reference(&self, name: &str, id: Oid, force: bool, ++ log_message: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let log_message = try!(CString::new(log_message)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_create(&mut raw, self.raw(), name, ++ id.raw(), force, ++ log_message)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Conditionally create new direct reference. ++ /// ++ /// A direct reference (also called an object id reference) refers directly ++ /// to a specific object id (a.k.a. OID or SHA) in the repository. The id ++ /// permanently refers to the object (although the reference itself can be ++ /// moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" ++ /// refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977. ++ /// ++ /// The direct reference will be created in the repository and written to ++ /// the disk. ++ /// ++ /// Valid reference names must follow one of two patterns: ++ /// ++ /// 1. Top-level names must contain only capital letters and underscores, ++ /// and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). ++ /// 2. Names prefixed with "refs/" can be almost anything. You must avoid ++ /// the characters `~`, `^`, `:`, `\\`, `?`, `[`, and `*`, and the ++ /// sequences ".." and "@{" which have special meaning to revparse. ++ /// ++ /// This function will return an error if a reference already exists with ++ /// the given name unless `force` is true, in which case it will be ++ /// overwritten. ++ /// ++ /// The message for the reflog will be ignored if the reference does not ++ /// belong in the standard set (HEAD, branches and remote-tracking ++ /// branches) and it does not have a reflog. ++ /// ++ /// It will return GIT_EMODIFIED if the reference's value at the time of ++ /// updating does not match the one passed through `current_id` (i.e. if the ++ /// ref has changed since the user read it). ++ pub fn reference_matching(&self, ++ name: &str, ++ id: Oid, ++ force: bool, ++ current_id: Oid, ++ log_message: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let log_message = try!(CString::new(log_message)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_create_matching(&mut raw, ++ self.raw(), ++ name, ++ id.raw(), ++ force, ++ current_id.raw(), ++ log_message)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new symbolic reference. ++ /// ++ /// This function will return an error if a reference already exists with ++ /// the given name unless force is true, in which case it will be ++ /// overwritten. ++ pub fn reference_symbolic(&self, name: &str, target: &str, ++ force: bool, ++ log_message: &str) ++ -> Result { ++ let name = try!(CString::new(name)); ++ let target = try!(CString::new(target)); ++ let log_message = try!(CString::new(log_message)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_symbolic_create(&mut raw, self.raw(), ++ name, target, force, ++ log_message)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new symbolic reference. ++ /// ++ /// This function will return an error if a reference already exists with ++ /// the given name unless force is true, in which case it will be ++ /// overwritten. ++ /// ++ /// It will return GIT_EMODIFIED if the reference's value at the time of ++ /// updating does not match the one passed through current_value (i.e. if ++ /// the ref has changed since the user read it). ++ pub fn reference_symbolic_matching(&self, ++ name: &str, ++ target: &str, ++ force: bool, ++ current_value: &str, ++ log_message: &str) ++ -> Result { ++ let name = try!(CString::new(name)); ++ let target = try!(CString::new(target)); ++ let current_value = try!(CString::new(current_value)); ++ let log_message = try!(CString::new(log_message)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_symbolic_create_matching(&mut raw, ++ self.raw(), ++ name, ++ target, ++ force, ++ current_value, ++ log_message)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Lookup a reference to one of the objects in a repository. ++ pub fn find_reference(&self, name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reference_lookup(&mut raw, self.raw(), name)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Lookup a reference by name and resolve immediately to OID. ++ /// ++ /// This function provides a quick way to resolve a reference name straight ++ /// through to the object id that it refers to. This avoids having to ++ /// allocate or free any `Reference` objects for simple situations. ++ pub fn refname_to_id(&self, name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_reference_name_to_id(&mut ret, self.raw(), name)); ++ Ok(Binding::from_raw(&ret as *const _)) ++ } ++ } ++ ++ /// Creates a git_annotated_commit from the given reference. ++ pub fn reference_to_annotated_commit(&self, reference: &Reference) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_annotated_commit_from_ref(&mut ret, ++ self.raw(), ++ reference.raw())); ++ Ok(AnnotatedCommit::from_raw(ret)) ++ } ++ } ++ ++ /// Create a new action signature with default user and now timestamp. ++ /// ++ /// This looks up the user.name and user.email from the configuration and ++ /// uses the current time as the timestamp, and creates a new signature ++ /// based on that information. It will return `NotFound` if either the ++ /// user.name or user.email are not set. ++ pub fn signature(&self) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_signature_default(&mut ret, self.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Set up a new git submodule for checkout. ++ /// ++ /// This does "git submodule add" up to the fetch and checkout of the ++ /// submodule contents. It preps a new submodule, creates an entry in ++ /// `.gitmodules` and creates an empty initialized repository either at the ++ /// given path in the working directory or in `.git/modules` with a gitlink ++ /// from the working directory to the new repo. ++ /// ++ /// To fully emulate "git submodule add" call this function, then `open()` ++ /// the submodule repo and perform the clone step as needed. Lastly, call ++ /// `add_finalize()` to wrap up adding the new submodule and `.gitmodules` ++ /// to the index to be ready to commit. ++ pub fn submodule(&self, url: &str, path: &Path, ++ use_gitlink: bool) -> Result { ++ let url = try!(CString::new(url)); ++ let path = try!(path.into_c_string()); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_submodule_add_setup(&mut raw, self.raw(), ++ url, path, use_gitlink)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Lookup submodule information by name or path. ++ /// ++ /// Given either the submodule name or path (they are usually the same), ++ /// this returns a structure describing the submodule. ++ pub fn find_submodule(&self, name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_submodule_lookup(&mut raw, self.raw(), name)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Get the status for a submodule. ++ /// ++ /// This looks at a submodule and tries to determine the status. It ++ /// will return a combination of the `SubmoduleStatus` values. ++ pub fn submodule_status(&self, name: &str, ignore: SubmoduleIgnore) ++ -> Result { ++ let mut ret = 0; ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_submodule_status(&mut ret, self.raw, name, ++ ignore)); ++ } ++ Ok(SubmoduleStatus::from_bits_truncate(ret as u32)) ++ } ++ ++ /// Lookup a reference to one of the objects in a repository. ++ pub fn find_tree(&self, oid: Oid) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tree_lookup(&mut raw, self.raw(), oid.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Create a new TreeBuilder, optionally initialized with the ++ /// entries of the given Tree. ++ /// ++ /// The tree builder can be used to create or modify trees in memory and ++ /// write them as tree objects to the database. ++ pub fn treebuilder(&self, tree: Option<&Tree>) -> Result { ++ unsafe { ++ let mut ret = ptr::null_mut(); ++ let tree = match tree { ++ Some(tree) => tree.raw(), ++ None => ptr::null_mut(), ++ }; ++ try_call!(raw::git_treebuilder_new(&mut ret, self.raw, tree)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ ++ /// Create a new tag in the repository from an object ++ /// ++ /// A new reference will also be created pointing to this tag object. If ++ /// `force` is true and a reference already exists with the given name, ++ /// it'll be replaced. ++ /// ++ /// The message will not be cleaned up. ++ /// ++ /// The tag name will be checked for validity. You must avoid the characters ++ /// '~', '^', ':', ' \ ', '?', '[', and '*', and the sequences ".." and " @ ++ /// {" which have special meaning to revparse. ++ pub fn tag(&self, name: &str, target: &Object, ++ tagger: &Signature, message: &str, ++ force: bool) -> Result { ++ let name = try!(CString::new(name)); ++ let message = try!(CString::new(message)); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_tag_create(&mut raw, self.raw, name, ++ target.raw(), tagger.raw(), ++ message, force)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Create a new lightweight tag pointing at a target object ++ /// ++ /// A new direct reference will be created pointing to this target object. ++ /// If force is true and a reference already exists with the given name, ++ /// it'll be replaced. ++ pub fn tag_lightweight(&self, ++ name: &str, ++ target: &Object, ++ force: bool) -> Result { ++ let name = try!(CString::new(name)); ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_tag_create_lightweight(&mut raw, self.raw, name, ++ target.raw(), force)); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Lookup a tag object from the repository. ++ pub fn find_tag(&self, id: Oid) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tag_lookup(&mut raw, self.raw, id.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Delete an existing tag reference. ++ /// ++ /// The tag name will be checked for validity, see `tag` for some rules ++ /// about valid names. ++ pub fn tag_delete(&self, name: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_tag_delete(self.raw, name)); ++ Ok(()) ++ } ++ } ++ ++ /// Get a list with all the tags in the repository. ++ /// ++ /// An optional fnmatch pattern can also be specified. ++ pub fn tag_names(&self, pattern: Option<&str>) -> Result { ++ let mut arr = raw::git_strarray { ++ strings: 0 as *mut *mut c_char, ++ count: 0, ++ }; ++ unsafe { ++ match pattern { ++ Some(s) => { ++ let s = try!(CString::new(s)); ++ try_call!(raw::git_tag_list_match(&mut arr, s, self.raw)); ++ } ++ None => { try_call!(raw::git_tag_list(&mut arr, self.raw)); } ++ } ++ Ok(Binding::from_raw(arr)) ++ } ++ } ++ ++ /// Updates files in the index and the working tree to match the content of ++ /// the commit pointed at by HEAD. ++ pub fn checkout_head(&self, opts: Option<&mut CheckoutBuilder>) ++ -> Result<(), Error> { ++ unsafe { ++ let mut raw_opts = mem::zeroed(); ++ try_call!(raw::git_checkout_init_options(&mut raw_opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION)); ++ if let Some(c) = opts { ++ c.configure(&mut raw_opts); ++ } ++ ++ try_call!(raw::git_checkout_head(self.raw, &raw_opts)); ++ } ++ Ok(()) ++ } ++ ++ /// Updates files in the working tree to match the content of the index. ++ /// ++ /// If the index is `None`, the repository's index will be used. ++ pub fn checkout_index(&self, ++ index: Option<&mut Index>, ++ opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { ++ unsafe { ++ let mut raw_opts = mem::zeroed(); ++ try_call!(raw::git_checkout_init_options(&mut raw_opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION)); ++ if let Some(c) = opts { ++ c.configure(&mut raw_opts); ++ } ++ ++ try_call!(raw::git_checkout_index(self.raw, ++ index.map(|i| &mut *i.raw()), ++ &raw_opts)); ++ } ++ Ok(()) ++ } ++ ++ /// Updates files in the index and working tree to match the content of the ++ /// tree pointed at by the treeish. ++ pub fn checkout_tree(&self, ++ treeish: &Object, ++ opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { ++ unsafe { ++ let mut raw_opts = mem::zeroed(); ++ try_call!(raw::git_checkout_init_options(&mut raw_opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION)); ++ if let Some(c) = opts { ++ c.configure(&mut raw_opts); ++ } ++ ++ try_call!(raw::git_checkout_tree(self.raw, &*treeish.raw(), ++ &raw_opts)); ++ } ++ Ok(()) ++ } ++ ++ /// Merges the given commit(s) into HEAD, writing the results into the ++ /// working directory. Any changes are staged for commit and any conflicts ++ /// are written to the index. Callers should inspect the repository's index ++ /// after this completes, resolve any conflicts and prepare a commit. ++ /// ++ /// For compatibility with git, the repository is put into a merging state. ++ /// Once the commit is done (or if the uses wishes to abort), you should ++ /// clear this state by calling git_repository_state_cleanup(). ++ pub fn merge(&self, ++ annotated_commits: &[&AnnotatedCommit], ++ merge_opts: Option<&mut MergeOptions>, ++ checkout_opts: Option<&mut CheckoutBuilder>) ++ -> Result<(), Error> ++ { ++ unsafe { ++ let mut raw_checkout_opts = mem::zeroed(); ++ try_call!(raw::git_checkout_init_options(&mut raw_checkout_opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION)); ++ if let Some(c) = checkout_opts { ++ c.configure(&mut raw_checkout_opts); ++ } ++ ++ let mut commit_ptrs = annotated_commits.iter().map(|c| { ++ c.raw() as *const raw::git_annotated_commit ++ }).collect::>(); ++ ++ try_call!(raw::git_merge(self.raw, ++ commit_ptrs.as_mut_ptr(), ++ annotated_commits.len() as size_t, ++ merge_opts.map(|o| o.raw()) ++ .unwrap_or(ptr::null()), ++ &raw_checkout_opts)); ++ } ++ Ok(()) ++ } ++ ++ /// Merge two commits, producing an index that reflects the result of ++ /// the merge. The index may be written as-is to the working directory or ++ /// checked out. If the index is to be converted to a tree, the caller ++ /// should resolve any conflicts that arose as part of the merge. ++ pub fn merge_commits(&self, our_commit: &Commit, their_commit: &Commit, ++ opts: Option<&MergeOptions>) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_merge_commits(&mut raw, self.raw, ++ our_commit.raw(), ++ their_commit.raw(), ++ opts.map(|o| o.raw()))); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Merge two trees, producing an index that reflects the result of ++ /// the merge. The index may be written as-is to the working directory or ++ /// checked out. If the index is to be converted to a tree, the caller ++ /// should resolve any conflicts that arose as part of the merge. ++ pub fn merge_trees(&self, ancestor_tree: &Tree, our_tree: &Tree, ++ their_tree: &Tree, opts: Option<&MergeOptions>) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_merge_trees(&mut raw, self.raw, ancestor_tree.raw(), ++ our_tree.raw(), their_tree.raw(), ++ opts.map(|o| o.raw()))); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Remove all the metadata associated with an ongoing command like merge, ++ /// revert, cherry-pick, etc. For example: MERGE_HEAD, MERGE_MSG, etc. ++ pub fn cleanup_state(&self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_repository_state_cleanup(self.raw)); ++ } ++ Ok(()) ++ } ++ ++ /// Analyzes the given branch(es) and determines the opportunities for ++ /// merging them into the HEAD of the repository. ++ pub fn merge_analysis(&self, ++ their_heads: &[&AnnotatedCommit]) ++ -> Result<(MergeAnalysis, MergePreference), Error> { ++ unsafe { ++ let mut raw_merge_analysis = 0 as raw::git_merge_analysis_t; ++ let mut raw_merge_preference = 0 as raw::git_merge_preference_t; ++ let mut their_heads = their_heads ++ .iter() ++ .map(|v| v.raw() as *const _) ++ .collect::>(); ++ try_call!(raw::git_merge_analysis(&mut raw_merge_analysis, ++ &mut raw_merge_preference, ++ self.raw, ++ their_heads.as_mut_ptr() as *mut _, ++ their_heads.len())); ++ Ok((MergeAnalysis::from_bits_truncate(raw_merge_analysis as u32), MergePreference::from_bits_truncate(raw_merge_preference as u32))) ++ } ++ } ++ ++ /// Add a note for an object ++ /// ++ /// The `notes_ref` argument is the canonical name of the reference to use, ++ /// defaulting to "refs/notes/commits". If `force` is specified then ++ /// previous notes are overwritten. ++ pub fn note(&self, ++ author: &Signature, ++ committer: &Signature, ++ notes_ref: Option<&str>, ++ oid: Oid, ++ note: &str, ++ force: bool) -> Result { ++ let notes_ref = try!(::opt_cstr(notes_ref)); ++ let note = try!(CString::new(note)); ++ let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_note_create(&mut ret, ++ self.raw, ++ notes_ref, ++ author.raw(), ++ committer.raw(), ++ oid.raw(), ++ note, ++ force)); ++ Ok(Binding::from_raw(&ret as *const _)) ++ } ++ } ++ ++ /// Get the default notes reference for this repository ++ pub fn note_default_ref(&self) -> Result { ++ let ret = Buf::new(); ++ unsafe { ++ try_call!(raw::git_note_default_ref(ret.raw(), self.raw)); ++ } ++ Ok(str::from_utf8(&ret).unwrap().to_string()) ++ } ++ ++ /// Creates a new iterator for notes in this repository. ++ /// ++ /// The `notes_ref` argument is the canonical name of the reference to use, ++ /// defaulting to "refs/notes/commits". ++ /// ++ /// The iterator returned yields pairs of (Oid, Oid) where the first element ++ /// is the id of the note and the second id is the id the note is ++ /// annotating. ++ pub fn notes(&self, notes_ref: Option<&str>) -> Result { ++ let notes_ref = try!(::opt_cstr(notes_ref)); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_note_iterator_new(&mut ret, self.raw, notes_ref)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Read the note for an object. ++ /// ++ /// The `notes_ref` argument is the canonical name of the reference to use, ++ /// defaulting to "refs/notes/commits". ++ /// ++ /// The id specified is the Oid of the git object to read the note from. ++ pub fn find_note(&self, notes_ref: Option<&str>, id: Oid) ++ -> Result { ++ let notes_ref = try!(::opt_cstr(notes_ref)); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_note_read(&mut ret, self.raw, notes_ref, ++ id.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Remove the note for an object. ++ /// ++ /// The `notes_ref` argument is the canonical name of the reference to use, ++ /// defaulting to "refs/notes/commits". ++ /// ++ /// The id specified is the Oid of the git object to remove the note from. ++ pub fn note_delete(&self, ++ id: Oid, ++ notes_ref: Option<&str>, ++ author: &Signature, ++ committer: &Signature) -> Result<(), Error> { ++ let notes_ref = try!(::opt_cstr(notes_ref)); ++ unsafe { ++ try_call!(raw::git_note_remove(self.raw, notes_ref, author.raw(), ++ committer.raw(), id.raw())); ++ Ok(()) ++ } ++ } ++ ++ /// Create a revwalk that can be used to traverse the commit graph. ++ pub fn revwalk(&self) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_revwalk_new(&mut raw, self.raw())); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Get the blame for a single file. ++ pub fn blame_file(&self, path: &Path, opts: Option<&mut BlameOptions>) ++ -> Result { ++ let path = try!(path.into_c_string()); ++ let mut raw = ptr::null_mut(); ++ ++ unsafe { ++ try_call!(raw::git_blame_file(&mut raw, ++ self.raw(), ++ path, ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Find a merge base between two commits ++ pub fn merge_base(&self, one: Oid, two: Oid) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_merge_base(&mut raw, self.raw, ++ one.raw(), two.raw())); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++ ++ /// Find all merge bases between two commits ++ pub fn merge_bases(&self, one: Oid, two: Oid) -> Result { ++ let mut arr = raw::git_oidarray { ++ ids: ptr::null_mut(), ++ count: 0, ++ }; ++ unsafe { ++ try_call!(raw::git_merge_bases(&mut arr, self.raw, ++ one.raw(), two.raw())); ++ Ok(Binding::from_raw(arr)) ++ } ++ } ++ ++ /// Count the number of unique commits between two commit objects ++ /// ++ /// There is no need for branches containing the commits to have any ++ /// upstream relationship, but it helps to think of one as a branch and the ++ /// other as its upstream, the ahead and behind values will be what git ++ /// would report for the branches. ++ pub fn graph_ahead_behind(&self, local: Oid, upstream: Oid) ++ -> Result<(usize, usize), Error> { ++ unsafe { ++ let mut ahead: size_t = 0; ++ let mut behind: size_t = 0; ++ try_call!(raw::git_graph_ahead_behind(&mut ahead, &mut behind, ++ self.raw(), local.raw(), ++ upstream.raw())); ++ Ok((ahead as usize, behind as usize)) ++ } ++ } ++ ++ /// Determine if a commit is the descendant of another commit ++ pub fn graph_descendant_of(&self, commit: Oid, ancestor: Oid) ++ -> Result { ++ unsafe { ++ let rv = try_call!(raw::git_graph_descendant_of(self.raw(), ++ commit.raw(), ++ ancestor.raw())); ++ Ok(rv != 0) ++ } ++ } ++ ++ /// Read the reflog for the given reference ++ /// ++ /// If there is no reflog file for the given reference yet, an empty reflog ++ /// object will be returned. ++ pub fn reflog(&self, name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_reflog_read(&mut ret, self.raw, name)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Delete the reflog for the given reference ++ pub fn reflog_delete(&self, name: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { try_call!(raw::git_reflog_delete(self.raw, name)); } ++ Ok(()) ++ } ++ ++ /// Rename a reflog ++ /// ++ /// The reflog to be renamed is expected to already exist. ++ pub fn reflog_rename(&self, old_name: &str, new_name: &str) ++ -> Result<(), Error> { ++ let old_name = try!(CString::new(old_name)); ++ let new_name = try!(CString::new(new_name)); ++ unsafe { ++ try_call!(raw::git_reflog_rename(self.raw, old_name, new_name)); ++ } ++ Ok(()) ++ } ++ ++ /// Check if the given reference has a reflog. ++ pub fn reference_has_log(&self, name: &str) -> Result { ++ let name = try!(CString::new(name)); ++ let ret = unsafe { ++ try_call!(raw::git_reference_has_log(self.raw, name)) ++ }; ++ Ok(ret != 0) ++ } ++ ++ /// Ensure that the given reference has a reflog. ++ pub fn reference_ensure_log(&self, name: &str) -> Result<(), Error> { ++ let name = try!(CString::new(name)); ++ unsafe { ++ try_call!(raw::git_reference_ensure_log(self.raw, name)); ++ } ++ Ok(()) ++ } ++ ++ /// Describes a commit ++ /// ++ /// Performs a describe operation on the current commit and the worktree. ++ /// After performing a describe on HEAD, a status is run and description is ++ /// considered to be dirty if there are. ++ pub fn describe(&self, opts: &DescribeOptions) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_describe_workdir(&mut ret, self.raw, opts.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff with the difference between two tree objects. ++ /// ++ /// This is equivalent to `git diff ` ++ /// ++ /// The first tree will be used for the "old_file" side of the delta and the ++ /// second tree will be used for the "new_file" side of the delta. You can ++ /// pass `None` to indicate an empty tree, although it is an error to pass ++ /// `None` for both the `old_tree` and `new_tree`. ++ pub fn diff_tree_to_tree(&self, ++ old_tree: Option<&Tree>, ++ new_tree: Option<&Tree>, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_tree_to_tree(&mut ret, ++ self.raw(), ++ old_tree.map(|s| s.raw()), ++ new_tree.map(|s| s.raw()), ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff between a tree and repository index. ++ /// ++ /// This is equivalent to `git diff --cached ` or if you pass ++ /// the HEAD tree, then like `git diff --cached`. ++ /// ++ /// The tree you pass will be used for the "old_file" side of the delta, and ++ /// the index will be used for the "new_file" side of the delta. ++ /// ++ /// If you pass `None` for the index, then the existing index of the `repo` ++ /// will be used. In this case, the index will be refreshed from disk ++ /// (if it has changed) before the diff is generated. ++ /// ++ /// If the tree is `None`, then it is considered an empty tree. ++ pub fn diff_tree_to_index(&self, ++ old_tree: Option<&Tree>, ++ index: Option<&Index>, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_tree_to_index(&mut ret, ++ self.raw(), ++ old_tree.map(|s| s.raw()), ++ index.map(|s| s.raw()), ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff between two index objects. ++ /// ++ /// The first index will be used for the "old_file" side of the delta, and ++ /// the second index will be used for the "new_file" side of the delta. ++ pub fn diff_index_to_index(&self, ++ old_index: &Index, ++ new_index: &Index, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_index_to_index(&mut ret, ++ self.raw(), ++ old_index.raw(), ++ new_index.raw(), ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff between the repository index and the workdir directory. ++ /// ++ /// This matches the `git diff` command. See the note below on ++ /// `tree_to_workdir` for a discussion of the difference between ++ /// `git diff` and `git diff HEAD` and how to emulate a `git diff ` ++ /// using libgit2. ++ /// ++ /// The index will be used for the "old_file" side of the delta, and the ++ /// working directory will be used for the "new_file" side of the delta. ++ /// ++ /// If you pass `None` for the index, then the existing index of the `repo` ++ /// will be used. In this case, the index will be refreshed from disk ++ /// (if it has changed) before the diff is generated. ++ pub fn diff_index_to_workdir(&self, ++ index: Option<&Index>, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_index_to_workdir(&mut ret, ++ self.raw(), ++ index.map(|s| s.raw()), ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff between a tree and the working directory. ++ /// ++ /// The tree you provide will be used for the "old_file" side of the delta, ++ /// and the working directory will be used for the "new_file" side. ++ /// ++ /// This is not the same as `git diff ` or `git diff-index ++ /// `. Those commands use information from the index, whereas this ++ /// function strictly returns the differences between the tree and the files ++ /// in the working directory, regardless of the state of the index. Use ++ /// `tree_to_workdir_with_index` to emulate those commands. ++ /// ++ /// To see difference between this and `tree_to_workdir_with_index`, ++ /// consider the example of a staged file deletion where the file has then ++ /// been put back into the working dir and further modified. The ++ /// tree-to-workdir diff for that file is 'modified', but `git diff` would ++ /// show status 'deleted' since there is a staged delete. ++ /// ++ /// If `None` is passed for `tree`, then an empty tree is used. ++ pub fn diff_tree_to_workdir(&self, ++ old_tree: Option<&Tree>, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_tree_to_workdir(&mut ret, ++ self.raw(), ++ old_tree.map(|s| s.raw()), ++ opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a diff between a tree and the working directory using index data ++ /// to account for staged deletes, tracked files, etc. ++ /// ++ /// This emulates `git diff ` by diffing the tree to the index and ++ /// the index to the working directory and blending the results into a ++ /// single diff that includes staged deleted, etc. ++ pub fn diff_tree_to_workdir_with_index(&self, ++ old_tree: Option<&Tree>, ++ opts: Option<&mut DiffOptions>) ++ -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_diff_tree_to_workdir_with_index(&mut ret, ++ self.raw(), old_tree.map(|s| s.raw()), opts.map(|s| s.raw()))); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a PackBuilder ++ pub fn packbuilder(&self) -> Result { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_packbuilder_new(&mut ret, self.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Save the local modifications to a new stash. ++ pub fn stash_save(&mut self, ++ stasher: &Signature, ++ message: &str, ++ flags: Option) ++ -> Result { ++ unsafe { ++ let mut raw_oid = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ let message = try!(CString::new(message)); ++ let flags = flags.unwrap_or_else(StashFlags::empty); ++ try_call!(raw::git_stash_save(&mut raw_oid, ++ self.raw(), ++ stasher.raw(), ++ message, ++ flags.bits() as c_uint)); ++ Ok(Binding::from_raw(&raw_oid as *const _)) ++ } ++ } ++ ++ /// Apply a single stashed state from the stash list. ++ pub fn stash_apply(&mut self, ++ index: usize, ++ opts: Option<&mut StashApplyOptions>) ++ -> Result<(), Error> { ++ unsafe { ++ let opts = opts.map(|opts| opts.raw()); ++ try_call!(raw::git_stash_apply(self.raw(), index, opts)); ++ Ok(()) ++ } ++ } ++ ++ /// Loop over all the stashed states and issue a callback for each one. ++ /// ++ /// Return `true` to continue iterating or `false` to stop. ++ pub fn stash_foreach(&mut self, mut callback: C) -> Result<(), Error> ++ where C: FnMut(usize, &str, &Oid) -> bool ++ { ++ unsafe { ++ let mut data = StashCbData { callback: &mut callback }; ++ try_call!(raw::git_stash_foreach(self.raw(), ++ stash_cb, ++ &mut data as *mut _ as *mut _)); ++ Ok(()) ++ } ++ } ++ ++ /// Remove a single stashed state from the stash list. ++ pub fn stash_drop(&mut self, index: usize) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_stash_drop(self.raw(), index)); ++ Ok(()) ++ } ++ } ++ ++ /// Apply a single stashed state from the stash list and remove it from the list if successful. ++ pub fn stash_pop(&mut self, ++ index: usize, ++ opts: Option<&mut StashApplyOptions>) ++ -> Result<(), Error> { ++ unsafe { ++ let opts = opts.map(|opts| opts.raw()); ++ try_call!(raw::git_stash_pop(self.raw(), index, opts)); ++ Ok(()) ++ } ++ } ++ ++ /// Add ignore rules for a repository. ++ /// ++ /// The format of the rules is the same one of the .gitignore file. ++ pub fn add_ignore_rule(&self, rules: &str) -> Result<(), Error> { ++ let rules = CString::new(rules)?; ++ unsafe { ++ try_call!(raw::git_ignore_add_rule(self.raw, rules)); ++ } ++ Ok(()) ++ } ++ ++ /// Clear ignore rules that were explicitly added. ++ pub fn clear_ignore_rules(&self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_ignore_clear_internal_rules(self.raw)); ++ } ++ Ok(()) ++ } ++ ++ /// Test if the ignore rules apply to a given path. ++ pub fn is_path_ignored>(&self, path: P) -> Result { ++ let path = if cfg!(windows) { ++ // `git_ignore_path_is_ignored` dose not work with windows path separator ++ // so we convert \ to / ++ try!(::std::ffi::CString::new(path.as_ref().to_string_lossy().replace('\\', "/"))) ++ } else { ++ try!(path.as_ref().into_c_string()) ++ }; ++ let mut ignored: c_int = 0; ++ unsafe { ++ try_call!(raw::git_ignore_path_is_ignored(&mut ignored, self.raw, path)); ++ } ++ Ok(ignored == 1) ++ } ++} ++ ++impl Binding for Repository { ++ type Raw = *mut raw::git_repository; ++ unsafe fn from_raw(ptr: *mut raw::git_repository) -> Repository { ++ Repository { raw: ptr } ++ } ++ fn raw(&self) -> *mut raw::git_repository { self.raw } ++} ++ ++impl Drop for Repository { ++ fn drop(&mut self) { ++ unsafe { raw::git_repository_free(self.raw) } ++ } ++} ++ ++impl RepositoryInitOptions { ++ /// Creates a default set of initialization options. ++ /// ++ /// By default this will set flags for creating all necessary directories ++ /// and initializing a directory from the user-configured templates path. ++ pub fn new() -> RepositoryInitOptions { ++ RepositoryInitOptions { ++ flags: raw::GIT_REPOSITORY_INIT_MKDIR as u32 | ++ raw::GIT_REPOSITORY_INIT_MKPATH as u32 | ++ raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE as u32, ++ mode: 0, ++ workdir_path: None, ++ description: None, ++ template_path: None, ++ initial_head: None, ++ origin_url: None, ++ } ++ } ++ ++ /// Create a bare repository with no working directory. ++ /// ++ /// Defaults to false. ++ pub fn bare(&mut self, bare: bool) -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_BARE, bare) ++ } ++ ++ /// Return an error if the repository path appears to already be a git ++ /// repository. ++ /// ++ /// Defaults to false. ++ pub fn no_reinit(&mut self, enabled: bool) -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_NO_REINIT, enabled) ++ } ++ ++ /// Normally a '/.git/' will be appended to the repo path for non-bare repos ++ /// (if it is not already there), but passing this flag prevents that ++ /// behavior. ++ /// ++ /// Defaults to false. ++ pub fn no_dotgit_dir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_NO_DOTGIT_DIR, enabled) ++ } ++ ++ /// Make the repo path (and workdir path) as needed. The ".git" directory ++ /// will always be created regardless of this flag. ++ /// ++ /// Defaults to true. ++ pub fn mkdir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_MKDIR, enabled) ++ } ++ ++ /// Recursively make all components of the repo and workdir path sas ++ /// necessary. ++ /// ++ /// Defaults to true. ++ pub fn mkpath(&mut self, enabled: bool) -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_MKPATH, enabled) ++ } ++ ++ /// Set to one of the `RepositoryInit` constants, or a custom value. ++ pub fn mode(&mut self, mode: RepositoryInitMode) ++ -> &mut RepositoryInitOptions { ++ self.mode = mode.bits(); ++ self ++ } ++ ++ /// Enable or disable using external templates. ++ /// ++ /// If enabled, then the `template_path` option will be queried first, then ++ /// `init.templatedir` from the global config, and finally ++ /// `/usr/share/git-core-templates` will be used (if it exists). ++ /// ++ /// Defaults to true. ++ pub fn external_template(&mut self, enabled: bool) ++ -> &mut RepositoryInitOptions { ++ self.flag(raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE, enabled) ++ } ++ ++ fn flag(&mut self, flag: raw::git_repository_init_flag_t, on: bool) ++ -> &mut RepositoryInitOptions { ++ if on { ++ self.flags |= flag as u32; ++ } else { ++ self.flags &= !(flag as u32); ++ } ++ self ++ } ++ ++ /// The path do the working directory. ++ /// ++ /// If this is a relative path it will be evaulated relative to the repo ++ /// path. If this is not the "natural" working directory, a .git gitlink ++ /// file will be created here linking to the repo path. ++ pub fn workdir_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { ++ self.workdir_path = Some(path.into_c_string().unwrap()); ++ self ++ } ++ ++ /// If set, this will be used to initialize the "description" file in the ++ /// repository instead of using the template content. ++ pub fn description(&mut self, desc: &str) -> &mut RepositoryInitOptions { ++ self.description = Some(CString::new(desc).unwrap()); ++ self ++ } ++ ++ /// When the `external_template` option is set, this is the first location ++ /// to check for the template directory. ++ /// ++ /// If this is not configured, then the default locations will be searched ++ /// instead. ++ pub fn template_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { ++ self.template_path = Some(path.into_c_string().unwrap()); ++ self ++ } ++ ++ /// The name of the head to point HEAD at. ++ /// ++ /// If not configured, this will be treated as `master` and the HEAD ref ++ /// will be set to `refs/heads/master`. If this begins with `refs/` it will ++ /// be used verbatim; otherwise `refs/heads/` will be prefixed ++ pub fn initial_head(&mut self, head: &str) -> &mut RepositoryInitOptions { ++ self.initial_head = Some(CString::new(head).unwrap()); ++ self ++ } ++ ++ /// If set, then after the rest of the repository initialization is ++ /// completed an `origin` remote will be added pointing to this URL. ++ pub fn origin_url(&mut self, url: &str) -> &mut RepositoryInitOptions { ++ self.origin_url = Some(CString::new(url).unwrap()); ++ self ++ } ++ ++ /// Creates a set of raw init options to be used with ++ /// `git_repository_init_ext`. ++ /// ++ /// This method is unsafe as the returned value may have pointers to the ++ /// interior of this structure. ++ pub unsafe fn raw(&self) -> raw::git_repository_init_options { ++ let mut opts = mem::zeroed(); ++ assert_eq!(raw::git_repository_init_init_options(&mut opts, ++ raw::GIT_REPOSITORY_INIT_OPTIONS_VERSION), 0); ++ opts.flags = self.flags; ++ opts.mode = self.mode; ++ opts.workdir_path = ::call::convert(&self.workdir_path); ++ opts.description = ::call::convert(&self.description); ++ opts.template_path = ::call::convert(&self.template_path); ++ opts.initial_head = ::call::convert(&self.initial_head); ++ opts.origin_url = ::call::convert(&self.origin_url); ++ opts ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::ffi::OsStr; ++ use std::fs; ++ use std::path::Path; ++ use tempdir::TempDir; ++ use {Repository, Oid, ObjectType, ResetType}; ++ use build::CheckoutBuilder; ++ ++ #[test] ++ fn smoke_init() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path(); ++ ++ let repo = Repository::init(path).unwrap(); ++ assert!(!repo.is_bare()); ++ } ++ ++ #[test] ++ fn smoke_init_bare() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path(); ++ ++ let repo = Repository::init_bare(path).unwrap(); ++ assert!(repo.is_bare()); ++ assert!(repo.namespace().is_none()); ++ } ++ ++ #[test] ++ fn smoke_open() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path(); ++ Repository::init(td.path()).unwrap(); ++ let repo = Repository::open(path).unwrap(); ++ assert!(!repo.is_bare()); ++ assert!(!repo.is_shallow()); ++ assert!(repo.is_empty().unwrap()); ++ assert_eq!(::test::realpath(&repo.path()).unwrap(), ++ ::test::realpath(&td.path().join(".git/")).unwrap()); ++ assert_eq!(repo.state(), ::RepositoryState::Clean); ++ } ++ ++ #[test] ++ fn smoke_open_bare() { ++ let td = TempDir::new("test").unwrap(); ++ let path = td.path(); ++ Repository::init_bare(td.path()).unwrap(); ++ ++ let repo = Repository::open(path).unwrap(); ++ assert!(repo.is_bare()); ++ assert_eq!(::test::realpath(&repo.path()).unwrap(), ++ ::test::realpath(&td.path().join("")).unwrap()); ++ } ++ ++ #[test] ++ fn smoke_checkout() { ++ let (_td, repo) = ::test::repo_init(); ++ repo.checkout_head(None).unwrap(); ++ } ++ ++ #[test] ++ fn smoke_revparse() { ++ let (_td, repo) = ::test::repo_init(); ++ let rev = repo.revparse("HEAD").unwrap(); ++ assert!(rev.to().is_none()); ++ let from = rev.from().unwrap(); ++ assert!(rev.from().is_some()); ++ ++ assert_eq!(repo.revparse_single("HEAD").unwrap().id(), from.id()); ++ let obj = repo.find_object(from.id(), None).unwrap().clone(); ++ obj.peel(ObjectType::Any).unwrap(); ++ obj.short_id().unwrap(); ++ repo.reset(&obj, ResetType::Hard, None).unwrap(); ++ let mut opts = CheckoutBuilder::new(); ++ t!(repo.reset(&obj, ResetType::Soft, Some(&mut opts))); ++ } ++ ++ #[test] ++ fn makes_dirs() { ++ let td = TempDir::new("foo").unwrap(); ++ Repository::init(&td.path().join("a/b/c/d")).unwrap(); ++ } ++ ++ #[test] ++ fn smoke_discover() { ++ let td = TempDir::new("test").unwrap(); ++ let subdir = td.path().join("subdi"); ++ fs::create_dir(&subdir).unwrap(); ++ Repository::init_bare(td.path()).unwrap(); ++ let repo = Repository::discover(&subdir).unwrap(); ++ assert_eq!(::test::realpath(&repo.path()).unwrap(), ++ ::test::realpath(&td.path().join("")).unwrap()); ++ } ++ ++ #[test] ++ fn smoke_open_ext() { ++ let td = TempDir::new("test").unwrap(); ++ let subdir = td.path().join("subdir"); ++ fs::create_dir(&subdir).unwrap(); ++ Repository::init(td.path()).unwrap(); ++ ++ let repo = Repository::open_ext(&subdir, ::RepositoryOpenFlags::empty(), &[] as &[&OsStr]).unwrap(); ++ assert!(!repo.is_bare()); ++ assert_eq!(::test::realpath(&repo.path()).unwrap(), ++ ::test::realpath(&td.path().join(".git")).unwrap()); ++ ++ let repo = Repository::open_ext(&subdir, ::RepositoryOpenFlags::BARE, &[] as &[&OsStr]).unwrap(); ++ assert!(repo.is_bare()); ++ assert_eq!(::test::realpath(&repo.path()).unwrap(), ++ ::test::realpath(&td.path().join(".git")).unwrap()); ++ ++ let err = Repository::open_ext(&subdir, ::RepositoryOpenFlags::NO_SEARCH, &[] as &[&OsStr]).err().unwrap(); ++ assert_eq!(err.code(), ::ErrorCode::NotFound); ++ ++ assert!(Repository::open_ext(&subdir, ++ ::RepositoryOpenFlags::empty(), ++ &[&subdir]).is_ok()); ++ } ++ ++ fn graph_repo_init() -> (TempDir, Repository) { ++ let (_td, repo) = ::test::repo_init(); ++ { ++ let head = repo.head().unwrap().target().unwrap(); ++ let head = repo.find_commit(head).unwrap(); ++ ++ let mut index = repo.index().unwrap(); ++ let id = index.write_tree().unwrap(); ++ ++ let tree = repo.find_tree(id).unwrap(); ++ let sig = repo.signature().unwrap(); ++ repo.commit(Some("HEAD"), &sig, &sig, "second", ++ &tree, &[&head]).unwrap(); ++ } ++ (_td, repo) ++ } ++ ++ #[test] ++ fn smoke_graph_ahead_behind() { ++ let (_td, repo) = graph_repo_init(); ++ let head = repo.head().unwrap().target().unwrap(); ++ let head = repo.find_commit(head).unwrap(); ++ let head_id = head.id(); ++ let head_parent_id = head.parent(0).unwrap().id(); ++ let (ahead, behind) = repo.graph_ahead_behind(head_id, ++ head_parent_id).unwrap(); ++ assert_eq!(ahead, 1); ++ assert_eq!(behind, 0); ++ let (ahead, behind) = repo.graph_ahead_behind(head_parent_id, ++ head_id).unwrap(); ++ assert_eq!(ahead, 0); ++ assert_eq!(behind, 1); ++ } ++ ++ #[test] ++ fn smoke_graph_descendant_of() { ++ let (_td, repo) = graph_repo_init(); ++ let head = repo.head().unwrap().target().unwrap(); ++ let head = repo.find_commit(head).unwrap(); ++ let head_id = head.id(); ++ let head_parent_id = head.parent(0).unwrap().id(); ++ assert!(repo.graph_descendant_of(head_id, head_parent_id).unwrap()); ++ assert!(!repo.graph_descendant_of(head_parent_id, head_id).unwrap()); ++ } ++ ++ #[test] ++ fn smoke_reference_has_log_ensure_log() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ assert_eq!(repo.reference_has_log("HEAD").unwrap(), true); ++ assert_eq!(repo.reference_has_log("refs/heads/master").unwrap(), true); ++ assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); ++ let master_oid = repo.revparse_single("master").unwrap().id(); ++ assert!(repo.reference("NOT_HEAD", master_oid, false, "creating a new branch").is_ok()); ++ assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); ++ assert!(repo.reference_ensure_log("NOT_HEAD").is_ok()); ++ assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), true); ++ } ++ ++ #[test] ++ fn smoke_set_head() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ assert!(repo.set_head("refs/heads/does-not-exist").is_ok()); ++ assert!(repo.head().is_err()); ++ ++ assert!(repo.set_head("refs/heads/master").is_ok()); ++ assert!(repo.head().is_ok()); ++ ++ assert!(repo.set_head("*").is_err()); ++ } ++ ++ #[test] ++ fn smoke_set_head_detached() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ let void_oid = Oid::from_bytes(b"00000000000000000000").unwrap(); ++ assert!(repo.set_head_detached(void_oid).is_err()); ++ ++ let master_oid = repo.revparse_single("master").unwrap().id(); ++ assert!(repo.set_head_detached(master_oid).is_ok()); ++ assert_eq!(repo.head().unwrap().target().unwrap(), master_oid); ++ } ++ ++ /// create an octopus: ++ /// /---o2-o4 ++ /// o1 X ++ /// \---o3-o5 ++ /// and checks that the merge bases of (o4,o5) are (o2,o3) ++ #[test] ++ fn smoke_merge_bases() { ++ let (_td, repo) = graph_repo_init(); ++ let sig = repo.signature().unwrap(); ++ ++ // let oid1 = head ++ let oid1 = repo.head().unwrap().target().unwrap(); ++ let commit1 = repo.find_commit(oid1).unwrap(); ++ println!("created oid1 {:?}", oid1); ++ ++ repo.branch("branch_a", &commit1, true).unwrap(); ++ repo.branch("branch_b", &commit1, true).unwrap(); ++ ++ // create commit oid2 on branchA ++ let mut index = repo.index().unwrap(); ++ let p = Path::new(repo.workdir().unwrap()).join("file_a"); ++ println!("using path {:?}", p); ++ fs::File::create(&p).unwrap(); ++ index.add_path(Path::new("file_a")).unwrap(); ++ let id_a = index.write_tree().unwrap(); ++ let tree_a = repo.find_tree(id_a).unwrap(); ++ let oid2 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, ++ "commit 2", &tree_a, &[&commit1]).unwrap(); ++ let commit2 = repo.find_commit(oid2).unwrap(); ++ println!("created oid2 {:?}", oid2); ++ ++ t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); ++ ++ // create commit oid3 on branchB ++ let mut index = repo.index().unwrap(); ++ let p = Path::new(repo.workdir().unwrap()).join("file_b"); ++ fs::File::create(&p).unwrap(); ++ index.add_path(Path::new("file_b")).unwrap(); ++ let id_b = index.write_tree().unwrap(); ++ let tree_b = repo.find_tree(id_b).unwrap(); ++ let oid3 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, ++ "commit 3", &tree_b, &[&commit1]).unwrap(); ++ let commit3 = repo.find_commit(oid3).unwrap(); ++ println!("created oid3 {:?}", oid3); ++ ++ // create merge commit oid4 on branchA with parents oid2 and oid3 ++ //let mut index4 = repo.merge_commits(&commit2, &commit3, None).unwrap(); ++ repo.set_head("refs/heads/branch_a").unwrap(); ++ repo.checkout_head(None).unwrap(); ++ let oid4 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, ++ "commit 4", &tree_a, ++ &[&commit2, &commit3]).unwrap(); ++ //index4.write_tree_to(&repo).unwrap(); ++ println!("created oid4 {:?}", oid4); ++ ++ // create merge commit oid5 on branchB with parents oid2 and oid3 ++ //let mut index5 = repo.merge_commits(&commit3, &commit2, None).unwrap(); ++ repo.set_head("refs/heads/branch_b").unwrap(); ++ repo.checkout_head(None).unwrap(); ++ let oid5 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, ++ "commit 5", &tree_a, ++ &[&commit3, &commit2]).unwrap(); ++ //index5.write_tree_to(&repo).unwrap(); ++ println!("created oid5 {:?}", oid5); ++ ++ // merge bases of (oid4,oid5) should be (oid2,oid3) ++ let merge_bases = repo.merge_bases(oid4, oid5).unwrap(); ++ let mut found_oid2 = false; ++ let mut found_oid3 = false; ++ for mg in merge_bases.iter() { ++ println!("found merge base {:?}", mg); ++ if mg == &oid2 { ++ found_oid2 = true; ++ } else if mg == &oid3 { ++ found_oid3 = true; ++ } else { ++ assert!(false); ++ } ++ } ++ assert!(found_oid2); ++ assert!(found_oid3); ++ assert_eq!(merge_bases.len(), 2); ++ } ++ ++ #[test] ++ fn smoke_revparse_ext() { ++ let (_td, repo) = graph_repo_init(); ++ ++ { ++ let short_refname = "master"; ++ let expected_refname = "refs/heads/master"; ++ let (obj, reference) = repo.revparse_ext(short_refname).unwrap(); ++ let expected_obj = repo.revparse_single(expected_refname).unwrap(); ++ assert_eq!(obj.id(), expected_obj.id()); ++ assert_eq!(reference.unwrap().name().unwrap(), expected_refname); ++ } ++ { ++ let missing_refname = "refs/heads/does-not-exist"; ++ assert!(repo.revparse_ext(missing_refname).is_err()); ++ } ++ { ++ let (_obj, reference) = repo.revparse_ext("HEAD^").unwrap(); ++ assert!(reference.is_none()); ++ } ++ } ++ ++ #[test] ++ fn smoke_is_path_ignored() { ++ let (_td, repo) = graph_repo_init(); ++ ++ assert!(!repo.is_path_ignored(Path::new("/foo")).unwrap()); ++ ++ let _ = repo.add_ignore_rule("/foo"); ++ assert!(repo.is_path_ignored(Path::new("/foo")).unwrap()); ++ if cfg!(windows){ ++ assert!(repo.is_path_ignored(Path::new("\\foo\\thing")).unwrap()); ++ } ++ ++ ++ let _ = repo.clear_ignore_rules(); ++ assert!(!repo.is_path_ignored(Path::new("/foo")).unwrap()); ++ if cfg!(windows){ ++ assert!(!repo.is_path_ignored(Path::new("\\foo\\thing")).unwrap()); ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/revspec.rs index 000000000,000000000..eb18492ef new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/revspec.rs @@@ -1,0 -1,0 +1,26 @@@ ++use {Object, RevparseMode}; ++ ++/// A revspec represents a range of revisions within a repository. ++pub struct Revspec<'repo> { ++ from: Option>, ++ to: Option>, ++ mode: RevparseMode, ++} ++ ++impl<'repo> Revspec<'repo> { ++ /// Assembles a new revspec from the from/to components. ++ pub fn from_objects(from: Option>, ++ to: Option>, ++ mode: RevparseMode) -> Revspec<'repo> { ++ Revspec { from: from, to: to, mode: mode } ++ } ++ ++ /// Access the `from` range of this revspec. ++ pub fn from(&self) -> Option<&Object<'repo>> { self.from.as_ref() } ++ ++ /// Access the `to` range of this revspec. ++ pub fn to(&self) -> Option<&Object<'repo>> { self.to.as_ref() } ++ ++ /// Returns the intent of the revspec. ++ pub fn mode(&self) -> RevparseMode { self.mode } ++} diff --cc vendor/git2-0.7.5/src/revwalk.rs index 000000000,000000000..1e661db52 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/revwalk.rs @@@ -1,0 -1,0 +1,203 @@@ ++use std::marker; ++use std::ffi::CString; ++use libc::c_uint; ++ ++use {raw, Error, Sort, Oid, Repository}; ++use util::Binding; ++ ++/// A revwalk allows traversal of the commit graph defined by including one or ++/// more leaves and excluding one or more roots. ++pub struct Revwalk<'repo> { ++ raw: *mut raw::git_revwalk, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> Revwalk<'repo> { ++ /// Reset a revwalk to allow re-configuring it. ++ /// ++ /// The revwalk is automatically reset when iteration of its commits ++ /// completes. ++ pub fn reset(&mut self) { ++ unsafe { raw::git_revwalk_reset(self.raw()) } ++ } ++ ++ /// Set the order in which commits are visited. ++ pub fn set_sorting(&mut self, sort_mode: Sort) { ++ unsafe { ++ raw::git_revwalk_sorting(self.raw(), sort_mode.bits() as c_uint) ++ } ++ } ++ ++ /// Simplify the history by first-parent ++ /// ++ /// No parents other than the first for each commit will be enqueued. ++ pub fn simplify_first_parent(&mut self) { ++ unsafe { raw::git_revwalk_simplify_first_parent(self.raw) } ++ } ++ ++ /// Mark a commit to start traversal from. ++ /// ++ /// The given OID must belong to a committish on the walked repository. ++ /// ++ /// The given commit will be used as one of the roots when starting the ++ /// revision walk. At least one commit must be pushed onto the walker before ++ /// a walk can be started. ++ pub fn push(&mut self, oid: Oid) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_revwalk_push(self.raw(), oid.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Push the repository's HEAD ++ /// ++ /// For more information, see `push`. ++ pub fn push_head(&mut self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_revwalk_push_head(self.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Push matching references ++ /// ++ /// The OIDs pointed to by the references that match the given glob pattern ++ /// will be pushed to the revision walker. ++ /// ++ /// A leading 'refs/' is implied if not present as well as a trailing `/ \ ++ /// *` if the glob lacks '?', ' \ *' or '['. ++ /// ++ /// Any references matching this glob which do not point to a committish ++ /// will be ignored. ++ pub fn push_glob(&mut self, glob: &str) -> Result<(), Error> { ++ let glob = try!(CString::new(glob)); ++ unsafe { ++ try_call!(raw::git_revwalk_push_glob(self.raw, glob)); ++ } ++ Ok(()) ++ } ++ ++ /// Push and hide the respective endpoints of the given range. ++ /// ++ /// The range should be of the form `..` where each ++ /// `` is in the form accepted by `revparse_single`. The left-hand ++ /// commit will be hidden and the right-hand commit pushed. ++ pub fn push_range(&mut self, range: &str) -> Result<(), Error> { ++ let range = try!(CString::new(range)); ++ unsafe { ++ try_call!(raw::git_revwalk_push_range(self.raw, range)); ++ } ++ Ok(()) ++ } ++ ++ /// Push the OID pointed to by a reference ++ /// ++ /// The reference must point to a committish. ++ pub fn push_ref(&mut self, reference: &str) -> Result<(), Error> { ++ let reference = try!(CString::new(reference)); ++ unsafe { ++ try_call!(raw::git_revwalk_push_ref(self.raw, reference)); ++ } ++ Ok(()) ++ } ++ ++ /// Mark a commit as not of interest to this revwalk. ++ pub fn hide(&mut self, oid: Oid) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_revwalk_hide(self.raw(), oid.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Hide the repository's HEAD ++ /// ++ /// For more information, see `hide`. ++ pub fn hide_head(&mut self) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_revwalk_hide_head(self.raw())); ++ } ++ Ok(()) ++ } ++ ++ /// Hide matching references. ++ /// ++ /// The OIDs pointed to by the references that match the given glob pattern ++ /// and their ancestors will be hidden from the output on the revision walk. ++ /// ++ /// A leading 'refs/' is implied if not present as well as a trailing `/ \ ++ /// *` if the glob lacks '?', ' \ *' or '['. ++ /// ++ /// Any references matching this glob which do not point to a committish ++ /// will be ignored. ++ pub fn hide_glob(&mut self, glob: &str) -> Result<(), Error> { ++ let glob = try!(CString::new(glob)); ++ unsafe { ++ try_call!(raw::git_revwalk_hide_glob(self.raw, glob)); ++ } ++ Ok(()) ++ } ++ ++ /// Hide the OID pointed to by a reference. ++ /// ++ /// The reference must point to a committish. ++ pub fn hide_ref(&mut self, reference: &str) -> Result<(), Error> { ++ let reference = try!(CString::new(reference)); ++ unsafe { ++ try_call!(raw::git_revwalk_hide_ref(self.raw, reference)); ++ } ++ Ok(()) ++ } ++} ++ ++impl<'repo> Binding for Revwalk<'repo> { ++ type Raw = *mut raw::git_revwalk; ++ unsafe fn from_raw(raw: *mut raw::git_revwalk) -> Revwalk<'repo> { ++ Revwalk { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_revwalk { self.raw } ++} ++ ++impl<'repo> Drop for Revwalk<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_revwalk_free(self.raw) } ++ } ++} ++ ++impl<'repo> Iterator for Revwalk<'repo> { ++ type Item = Result; ++ fn next(&mut self) -> Option> { ++ let mut out: raw::git_oid = raw::git_oid{ id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call_iter!(raw::git_revwalk_next(&mut out, self.raw())); ++ Some(Ok(Binding::from_raw(&out as *const _))) ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = repo.head().unwrap(); ++ let target = head.target().unwrap(); ++ ++ let mut walk = repo.revwalk().unwrap(); ++ walk.push(target).unwrap(); ++ ++ let oids: Vec<::Oid> = walk.by_ref().collect::, _>>() ++ .unwrap(); ++ ++ assert_eq!(oids.len(), 1); ++ assert_eq!(oids[0], target); ++ ++ walk.reset(); ++ walk.push_head().unwrap(); ++ assert_eq!(walk.by_ref().count(), 1); ++ ++ walk.reset(); ++ walk.push_head().unwrap(); ++ walk.hide_head().unwrap(); ++ assert_eq!(walk.by_ref().count(), 0); ++ } ++} diff --cc vendor/git2-0.7.5/src/signature.rs index 000000000,000000000..61e7a8948 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/signature.rs @@@ -1,0 -1,0 +1,175 @@@ ++use std::ffi::CString; ++use std::marker; ++use std::mem; ++use std::ptr; ++use std::str; ++use std::fmt; ++use libc; ++ ++use {raw, Error, Time}; ++use util::Binding; ++ ++/// A Signature is used to indicate authorship of various actions throughout the ++/// library. ++/// ++/// Signatures contain a name, email, and timestamp. All fields can be specified ++/// with `new` while the `now` constructor omits the timestamp. The ++/// [`Repository::signature`] method can be used to create a default signature ++/// with name and email values read from the configuration. ++/// ++/// [`Repository::signature`]: struct.Repository.html#method.signature ++pub struct Signature<'a> { ++ raw: *mut raw::git_signature, ++ _marker: marker::PhantomData<&'a str>, ++ owned: bool, ++} ++ ++impl<'a> Signature<'a> { ++ /// Create a new action signature with a timestamp of 'now'. ++ /// ++ /// See `new` for more information ++ pub fn now(name: &str, email: &str) -> Result, Error> { ++ ::init(); ++ let mut ret = ptr::null_mut(); ++ let name = try!(CString::new(name)); ++ let email = try!(CString::new(email)); ++ unsafe { ++ try_call!(raw::git_signature_now(&mut ret, name, email)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Create a new action signature. ++ /// ++ /// The `time` specified is in seconds since the epoch, and the `offset` is ++ /// the time zone offset in minutes. ++ /// ++ /// Returns error if either `name` or `email` contain angle brackets. ++ pub fn new(name: &str, email: &str, time: &Time) ++ -> Result, Error> { ++ ::init(); ++ let mut ret = ptr::null_mut(); ++ let name = try!(CString::new(name)); ++ let email = try!(CString::new(email)); ++ unsafe { ++ try_call!(raw::git_signature_new(&mut ret, name, email, ++ time.seconds() as raw::git_time_t, ++ time.offset_minutes() as libc::c_int)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Gets the name on the signature. ++ /// ++ /// Returns `None` if the name is not valid utf-8 ++ pub fn name(&self) -> Option<&str> { ++ str::from_utf8(self.name_bytes()).ok() ++ } ++ ++ /// Gets the name on the signature as a byte slice. ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } ++ } ++ ++ /// Gets the email on the signature. ++ /// ++ /// Returns `None` if the email is not valid utf-8 ++ pub fn email(&self) -> Option<&str> { ++ str::from_utf8(self.email_bytes()).ok() ++ } ++ ++ /// Gets the email on the signature as a byte slice. ++ pub fn email_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, (*self.raw).email).unwrap() } ++ } ++ ++ /// Get the `when` of this signature. ++ pub fn when(&self) -> Time { ++ unsafe { Binding::from_raw((*self.raw).when) } ++ } ++ ++ /// Convert a signature of any lifetime into an owned signature with a ++ /// static lifetime. ++ pub fn to_owned(&self) -> Signature<'static> { ++ unsafe { ++ let me = mem::transmute::<&Signature<'a>, &Signature<'static>>(self); ++ me.clone() ++ } ++ } ++} ++ ++impl<'a> Binding for Signature<'a> { ++ type Raw = *mut raw::git_signature; ++ unsafe fn from_raw(raw: *mut raw::git_signature) -> Signature<'a> { ++ Signature { ++ raw: raw, ++ _marker: marker::PhantomData, ++ owned: true, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_signature { self.raw } ++} ++ ++/// Creates a new signature from the give raw pointer, tied to the lifetime ++/// of the given object. ++/// ++/// This function is unsafe as there is no guarantee that `raw` is valid for ++/// `'a` nor if it's a valid pointer. ++pub unsafe fn from_raw_const<'b, T>(_lt: &'b T, ++ raw: *const raw::git_signature) ++ -> Signature<'b> { ++ Signature { ++ raw: raw as *mut raw::git_signature, ++ _marker: marker::PhantomData, ++ owned: false, ++ } ++} ++ ++impl Clone for Signature<'static> { ++ fn clone(&self) -> Signature<'static> { ++ // TODO: can this be defined for 'a and just do a plain old copy if the ++ // lifetime isn't static? ++ let mut raw = ptr::null_mut(); ++ let rc = unsafe { raw::git_signature_dup(&mut raw, &*self.raw) }; ++ assert_eq!(rc, 0); ++ unsafe { Binding::from_raw(raw) } ++ } ++} ++ ++impl<'a> Drop for Signature<'a> { ++ fn drop(&mut self) { ++ if self.owned { ++ unsafe { raw::git_signature_free(self.raw) } ++ } ++ } ++} ++ ++impl<'a> fmt::Display for Signature<'a> { ++ ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ write!(f, "{} <{}>", ++ String::from_utf8_lossy(self.name_bytes()), ++ String::from_utf8_lossy(self.email_bytes())) ++ } ++ ++} ++ ++#[cfg(test)] ++mod tests { ++ use {Signature, Time}; ++ ++ #[test] ++ fn smoke() { ++ Signature::new("foo", "bar", &Time::new(89, 0)).unwrap(); ++ Signature::now("foo", "bar").unwrap(); ++ assert!(Signature::new("", "bar", &Time::new(89, 0)).is_err()); ++ assert!(Signature::now("", "bar").is_err()); ++ ++ let s = Signature::now("foo", "bar").unwrap(); ++ assert_eq!(s.name(), Some("foo")); ++ assert_eq!(s.email(), Some("bar")); ++ ++ drop(s.clone()); ++ drop(s.to_owned()); ++ } ++} diff --cc vendor/git2-0.7.5/src/stash.rs index 000000000,000000000..3d07465a3 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/stash.rs @@@ -1,0 -1,0 +1,210 @@@ ++use {raw, panic, Oid, StashApplyProgress}; ++use std::ffi::{CStr}; ++use util::{Binding}; ++use libc::{c_int, c_char, size_t, c_void}; ++use build::{CheckoutBuilder}; ++use std::mem; ++ ++/// Stash application progress notification function. ++/// ++/// Return `true` to continue processing, or `false` to ++/// abort the stash application. ++pub type StashApplyProgressCb<'a> = FnMut(StashApplyProgress) -> bool + 'a; ++ ++/// This is a callback function you can provide to iterate over all the ++/// stashed states that will be invoked per entry. ++pub type StashCb<'a> = FnMut(usize, &str, &Oid) -> bool + 'a; ++ ++#[allow(unused)] ++/// Stash application options structure ++pub struct StashApplyOptions<'cb> { ++ progress: Option>>, ++ checkout_options: Option>, ++ raw_opts: raw::git_stash_apply_options ++} ++ ++impl<'cb> Default for StashApplyOptions<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl<'cb> StashApplyOptions<'cb> { ++ /// Creates a default set of merge options. ++ pub fn new() -> StashApplyOptions<'cb> { ++ let mut opts = StashApplyOptions { ++ progress: None, ++ checkout_options: None, ++ raw_opts: unsafe { mem::zeroed() }, ++ }; ++ assert_eq!(unsafe { ++ raw::git_stash_apply_init_options(&mut opts.raw_opts, 1) ++ }, 0); ++ opts ++ } ++ ++ /// Set stash application flag to GIT_STASH_APPLY_REINSTATE_INDEX ++ pub fn reinstantiate_index(&mut self) -> &mut StashApplyOptions<'cb> { ++ self.raw_opts.flags = raw::GIT_STASH_APPLY_REINSTATE_INDEX; ++ self ++ } ++ ++ /// Options to use when writing files to the working directory ++ pub fn checkout_options(&mut self, opts: CheckoutBuilder<'cb>) -> &mut StashApplyOptions<'cb> { ++ self.checkout_options = Some(opts); ++ self ++ } ++ ++ /// Optional callback to notify the consumer of application progress. ++ /// ++ /// Return `true` to continue processing, or `false` to ++ /// abort the stash application. ++ pub fn progress_cb(&mut self, callback: C) -> &mut StashApplyOptions<'cb> ++ where C: FnMut(StashApplyProgress) -> bool + 'cb ++ { ++ self.progress = Some(Box::new(callback) as Box>); ++ self.raw_opts.progress_cb = stash_apply_progress_cb; ++ self.raw_opts.progress_payload = self as *mut _ as *mut _; ++ self ++ } ++ ++ /// Pointer to a raw git_stash_apply_options ++ pub fn raw(&mut self) -> &raw::git_stash_apply_options { ++ unsafe { ++ if let Some(opts) = self.checkout_options.as_mut() { ++ opts.configure(&mut self.raw_opts.checkout_options); ++ } ++ } ++ &self.raw_opts ++ } ++} ++ ++#[allow(unused)] ++pub struct StashCbData<'a> { ++ pub callback: &'a mut StashCb<'a> ++} ++ ++#[allow(unused)] ++pub extern fn stash_cb(index: size_t, ++ message: *const c_char, ++ stash_id: *const raw::git_oid, ++ payload: *mut c_void) ++ -> c_int ++{ ++ panic::wrap(|| unsafe { ++ let mut data = &mut *(payload as *mut StashCbData); ++ let res = { ++ let mut callback = &mut data.callback; ++ callback(index, ++ CStr::from_ptr(message).to_str().unwrap(), ++ &Binding::from_raw(stash_id)) ++ }; ++ ++ if res { 0 } else { 1 } ++ }).unwrap_or(1) ++} ++ ++fn convert_progress(progress: raw::git_stash_apply_progress_t) -> StashApplyProgress { ++ match progress { ++ raw::GIT_STASH_APPLY_PROGRESS_NONE => StashApplyProgress::None, ++ raw::GIT_STASH_APPLY_PROGRESS_LOADING_STASH => StashApplyProgress::LoadingStash, ++ raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX => StashApplyProgress::AnalyzeIndex, ++ raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED => StashApplyProgress::AnalyzeModified, ++ raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED => StashApplyProgress::AnalyzeUntracked, ++ raw::GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED => StashApplyProgress::CheckoutUntracked, ++ raw::GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED => StashApplyProgress::CheckoutModified, ++ raw::GIT_STASH_APPLY_PROGRESS_DONE => StashApplyProgress::Done, ++ ++ _ => StashApplyProgress::None ++ } ++} ++ ++#[allow(unused)] ++extern fn stash_apply_progress_cb(progress: raw::git_stash_apply_progress_t, ++ payload: *mut c_void) ++ -> c_int ++{ ++ panic::wrap(|| unsafe { ++ let mut options = &mut *(payload as *mut StashApplyOptions); ++ let res = { ++ let mut callback = options.progress.as_mut().unwrap(); ++ callback(convert_progress(progress)) ++ }; ++ ++ if res { 0 } else { -1 } ++ }).unwrap_or(-1) ++} ++ ++#[cfg(test)] ++mod tests { ++ use stash::{StashApplyOptions}; ++ use std::io::{Write}; ++ use std::fs; ++ use std::path::Path; ++ use test::{repo_init}; ++ use {Repository, Status, StashFlags}; ++ ++ fn make_stash(next: C) where C: FnOnce(&mut Repository) { ++ let (_td, mut repo) = repo_init(); ++ let signature = repo.signature().unwrap(); ++ ++ let p = Path::new(repo.workdir().unwrap()).join("file_b.txt"); ++ println!("using path {:?}", p); ++ fs::File::create(&p).unwrap() ++ .write("data".as_bytes()).unwrap(); ++ ++ let rel_p = Path::new("file_b.txt"); ++ assert!(repo.status_file(&rel_p).unwrap() == Status::WT_NEW); ++ ++ repo.stash_save(&signature, "msg1", Some(StashFlags::INCLUDE_UNTRACKED)).unwrap(); ++ ++ assert!(repo.status_file(&rel_p).is_err()); ++ ++ let mut count = 0; ++ repo.stash_foreach(|index, name, _oid| { ++ count += 1; ++ assert!(index == 0); ++ assert!(name == "On master: msg1"); ++ true ++ }).unwrap(); ++ ++ assert!(count == 1); ++ next(&mut repo); ++ } ++ ++ fn count_stash(repo: &mut Repository) -> usize { ++ let mut count = 0; ++ repo.stash_foreach(|_, _, _| { count += 1; true }).unwrap(); ++ count ++ } ++ ++ #[test] ++ fn smoke_stash_save_drop() { ++ make_stash(|repo| { ++ repo.stash_drop(0).unwrap(); ++ assert!(count_stash(repo) == 0) ++ }) ++ } ++ ++ #[test] ++ fn smoke_stash_save_pop() { ++ make_stash(|repo| { ++ repo.stash_pop(0, None).unwrap(); ++ assert!(count_stash(repo) == 0) ++ }) ++ } ++ ++ #[test] ++ fn smoke_stash_save_apply() { ++ make_stash(|repo| { ++ let mut options = StashApplyOptions::new(); ++ options.progress_cb(|progress| { ++ println!("{:?}", progress); ++ true ++ }); ++ ++ repo.stash_apply(0, Some(&mut options)).unwrap(); ++ assert!(count_stash(repo) == 1) ++ }) ++ } ++} diff --cc vendor/git2-0.7.5/src/status.rs index 000000000,000000000..cb67305e4 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/status.rs @@@ -1,0 -1,0 +1,418 @@@ ++use std::ffi::CString; ++use std::ops::Range; ++use std::marker; ++use std::mem; ++use std::str; ++use libc::{c_char, size_t, c_uint}; ++ ++use {raw, Status, DiffDelta, IntoCString, Repository}; ++use util::Binding; ++ ++/// Options that can be provided to `repo.statuses()` to control how the status ++/// information is gathered. ++pub struct StatusOptions { ++ raw: raw::git_status_options, ++ pathspec: Vec, ++ ptrs: Vec<*const c_char>, ++} ++ ++/// Enumeration of possible methods of what can be shown through a status ++/// operation. ++#[derive(Copy, Clone)] ++pub enum StatusShow { ++ /// Only gives status based on HEAD to index comparison, not looking at ++ /// working directory changes. ++ Index, ++ ++ /// Only gives status based on index to working directory comparison, not ++ /// comparing the index to the HEAD. ++ Workdir, ++ ++ /// The default, this roughly matches `git status --porcelain` regarding ++ /// which files are included and in what order. ++ IndexAndWorkdir, ++} ++ ++/// A container for a list of status information about a repository. ++/// ++/// Each instance appears as if it were a collection, having a length and ++/// allowing indexing, as well as providing an iterator. ++pub struct Statuses<'repo> { ++ raw: *mut raw::git_status_list, ++ ++ // Hm, not currently present, but can't hurt? ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++/// An iterator over the statuses in a `Statuses` instance. ++pub struct StatusIter<'statuses> { ++ statuses: &'statuses Statuses<'statuses>, ++ range: Range, ++} ++ ++/// A structure representing an entry in the `Statuses` structure. ++/// ++/// Instances are created through the `.iter()` method or the `.get()` method. ++pub struct StatusEntry<'statuses> { ++ raw: *const raw::git_status_entry, ++ _marker: marker::PhantomData<&'statuses DiffDelta<'statuses>>, ++} ++ ++impl Default for StatusOptions { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++impl StatusOptions { ++ /// Creates a new blank set of status options. ++ pub fn new() -> StatusOptions { ++ unsafe { ++ let mut raw = mem::zeroed(); ++ let r = raw::git_status_init_options(&mut raw, ++ raw::GIT_STATUS_OPTIONS_VERSION); ++ assert_eq!(r, 0); ++ StatusOptions { ++ raw: raw, ++ pathspec: Vec::new(), ++ ptrs: Vec::new(), ++ } ++ } ++ } ++ ++ /// Select the files on which to report status. ++ /// ++ /// The default, if unspecified, is to show the index and the working ++ /// directory. ++ pub fn show(&mut self, show: StatusShow) -> &mut StatusOptions { ++ self.raw.show = match show { ++ StatusShow::Index => raw::GIT_STATUS_SHOW_INDEX_ONLY, ++ StatusShow::Workdir => raw::GIT_STATUS_SHOW_WORKDIR_ONLY, ++ StatusShow::IndexAndWorkdir => raw::GIT_STATUS_SHOW_INDEX_AND_WORKDIR, ++ }; ++ self ++ } ++ ++ /// Add a path pattern to match (using fnmatch-style matching). ++ /// ++ /// If the `disable_pathspec_match` option is given, then this is a literal ++ /// path to match. If this is not called, then there will be no patterns to ++ /// match and the entire directory will be used. ++ pub fn pathspec(&mut self, pathspec: T) ++ -> &mut StatusOptions { ++ let s = pathspec.into_c_string().unwrap(); ++ self.ptrs.push(s.as_ptr()); ++ self.pathspec.push(s); ++ self ++ } ++ ++ fn flag(&mut self, flag: raw::git_status_opt_t, val: bool) ++ -> &mut StatusOptions { ++ if val { ++ self.raw.flags |= flag as c_uint; ++ } else { ++ self.raw.flags &= !(flag as c_uint); ++ } ++ self ++ } ++ ++ /// Flag whether untracked files will be included. ++ /// ++ /// Untracked files will only be included if the workdir files are included ++ /// in the status "show" option. ++ pub fn include_untracked(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNTRACKED, include) ++ } ++ ++ /// Flag whether ignored files will be included. ++ /// ++ /// The files will only be included if the workdir files are included ++ /// in the status "show" option. ++ pub fn include_ignored(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_INCLUDE_IGNORED, include) ++ } ++ ++ /// Flag to include unmodified files. ++ pub fn include_unmodified(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNMODIFIED, include) ++ } ++ ++ /// Flag that submodules should be skipped. ++ /// ++ /// This only applies if there are no pending typechanges to the submodule ++ /// (either from or to another type). ++ pub fn exclude_submodules(&mut self, exclude: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_EXCLUDE_SUBMODULES, exclude) ++ } ++ ++ /// Flag that all files in untracked directories should be included. ++ /// ++ /// Normally if an entire directory is new then just the top-level directory ++ /// is included (with a trailing slash on the entry name). ++ pub fn recurse_untracked_dirs(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS, include) ++ } ++ ++ /// Indicates that the given paths should be treated as literals paths, note ++ /// patterns. ++ pub fn disable_pathspec_match(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH, include) ++ } ++ ++ /// Indicates that the contents of ignored directories should be included in ++ /// the status. ++ pub fn recurse_ignored_dirs(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_RECURSE_IGNORED_DIRS, include) ++ } ++ ++ /// Indicates that rename detection should be processed between the head. ++ pub fn renames_head_to_index(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_RENAMES_HEAD_TO_INDEX, include) ++ } ++ ++ /// Indicates that rename detection should be run between the index and the ++ /// working directory. ++ pub fn renames_index_to_workdir(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_RENAMES_INDEX_TO_WORKDIR, include) ++ } ++ ++ /// Override the native case sensitivity for the file system and force the ++ /// output to be in case sensitive order. ++ pub fn sort_case_sensitively(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_SORT_CASE_SENSITIVELY, include) ++ } ++ ++ /// Override the native case sensitivity for the file system and force the ++ /// output to be in case-insensitive order. ++ pub fn sort_case_insensitively(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_SORT_CASE_INSENSITIVELY, include) ++ } ++ ++ /// Indicates that rename detection should include rewritten files. ++ pub fn renames_from_rewrites(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_RENAMES_FROM_REWRITES, include) ++ } ++ ++ /// Bypasses the default status behavior of doing a "soft" index reload. ++ pub fn no_refresh(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_NO_REFRESH, include) ++ } ++ ++ /// Refresh the stat cache in the index for files are unchanged but have ++ /// out of date stat information in the index. ++ /// ++ /// This will result in less work being done on subsequent calls to fetching ++ /// the status. ++ pub fn update_index(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_UPDATE_INDEX, include) ++ } ++ ++ // erm... ++ #[allow(missing_docs)] ++ pub fn include_unreadable(&mut self, include: bool) -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE, include) ++ } ++ ++ // erm... ++ #[allow(missing_docs)] ++ pub fn include_unreadable_as_untracked(&mut self, include: bool) ++ -> &mut StatusOptions { ++ self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED, include) ++ } ++ ++ /// Get a pointer to the inner list of status options. ++ /// ++ /// This function is unsafe as the returned structure has interior pointers ++ /// and may no longer be valid if these options continue to be mutated. ++ pub unsafe fn raw(&mut self) -> *const raw::git_status_options { ++ self.raw.pathspec.strings = self.ptrs.as_ptr() as *mut _; ++ self.raw.pathspec.count = self.ptrs.len() as size_t; ++ &self.raw ++ } ++} ++ ++impl<'repo> Statuses<'repo> { ++ /// Gets a status entry from this list at the specified index. ++ /// ++ /// Returns `None` if the index is out of bounds. ++ pub fn get(&self, index: usize) -> Option { ++ unsafe { ++ let p = raw::git_status_byindex(self.raw, index as size_t); ++ Binding::from_raw_opt(p) ++ } ++ } ++ ++ /// Gets the count of status entries in this list. ++ /// ++ /// If there are no changes in status (according to the options given ++ /// when the status list was created), this should return 0. ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_status_list_entrycount(self.raw) as usize } ++ } ++ ++ /// Return `true` if there is no status entry in this list. ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Returns an iterator over the statuses in this list. ++ pub fn iter(&self) -> StatusIter { ++ StatusIter { ++ statuses: self, ++ range: 0..self.len(), ++ } ++ } ++} ++ ++impl<'repo> Binding for Statuses<'repo> { ++ type Raw = *mut raw::git_status_list; ++ unsafe fn from_raw(raw: *mut raw::git_status_list) -> Statuses<'repo> { ++ Statuses { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_status_list { self.raw } ++} ++ ++impl<'repo> Drop for Statuses<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_status_list_free(self.raw); } ++ } ++} ++ ++impl<'a> Iterator for StatusIter<'a> { ++ type Item = StatusEntry<'a>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.statuses.get(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'a> DoubleEndedIterator for StatusIter<'a> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.statuses.get(i)) ++ } ++} ++impl<'a> ExactSizeIterator for StatusIter<'a> {} ++ ++impl<'statuses> StatusEntry<'statuses> { ++ /// Access the bytes for this entry's corresponding pathname ++ pub fn path_bytes(&self) -> &[u8] { ++ unsafe { ++ if (*self.raw).head_to_index.is_null() { ++ ::opt_bytes(self, (*(*self.raw).index_to_workdir).old_file.path) ++ } else { ++ ::opt_bytes(self, (*(*self.raw).head_to_index).old_file.path) ++ }.unwrap() ++ } ++ } ++ ++ /// Access this entry's path name as a string. ++ /// ++ /// Returns `None` if the path is not valid utf-8. ++ pub fn path(&self) -> Option<&str> { str::from_utf8(self.path_bytes()).ok() } ++ ++ /// Access the status flags for this file ++ pub fn status(&self) -> Status { ++ Status::from_bits_truncate(unsafe { (*self.raw).status as u32 }) ++ } ++ ++ /// Access detailed information about the differences between the file in ++ /// HEAD and the file in the index. ++ pub fn head_to_index(&self) -> Option> { ++ unsafe { ++ Binding::from_raw_opt((*self.raw).head_to_index) ++ } ++ } ++ ++ /// Access detailed information about the differences between the file in ++ /// the index and the file in the working directory. ++ pub fn index_to_workdir(&self) -> Option> { ++ unsafe { ++ Binding::from_raw_opt((*self.raw).index_to_workdir) ++ } ++ } ++} ++ ++impl<'statuses> Binding for StatusEntry<'statuses> { ++ type Raw = *const raw::git_status_entry; ++ ++ unsafe fn from_raw(raw: *const raw::git_status_entry) ++ -> StatusEntry<'statuses> { ++ StatusEntry { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *const raw::git_status_entry { self.raw } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::File; ++ use std::path::Path; ++ use std::io::prelude::*; ++ use super::StatusOptions; ++ ++ #[test] ++ fn smoke() { ++ let (td, repo) = ::test::repo_init(); ++ assert_eq!(repo.statuses(None).unwrap().len(), 0); ++ File::create(&td.path().join("foo")).unwrap(); ++ let statuses = repo.statuses(None).unwrap(); ++ assert_eq!(statuses.iter().count(), 1); ++ let status = statuses.iter().next().unwrap(); ++ assert_eq!(status.path(), Some("foo")); ++ assert!(status.status().contains(::Status::WT_NEW)); ++ assert!(!status.status().contains(::Status::INDEX_NEW)); ++ assert!(status.head_to_index().is_none()); ++ let diff = status.index_to_workdir().unwrap(); ++ assert_eq!(diff.old_file().path_bytes().unwrap(), b"foo"); ++ assert_eq!(diff.new_file().path_bytes().unwrap(), b"foo"); ++ } ++ ++ #[test] ++ fn filter() { ++ let (td, repo) = ::test::repo_init(); ++ t!(File::create(&td.path().join("foo"))); ++ t!(File::create(&td.path().join("bar"))); ++ let mut opts = StatusOptions::new(); ++ opts.include_untracked(true) ++ .pathspec("foo"); ++ ++ let statuses = t!(repo.statuses(Some(&mut opts))); ++ assert_eq!(statuses.iter().count(), 1); ++ let status = statuses.iter().next().unwrap(); ++ assert_eq!(status.path(), Some("foo")); ++ } ++ ++ #[test] ++ fn gitignore() { ++ let (td, repo) = ::test::repo_init(); ++ t!(t!(File::create(td.path().join(".gitignore"))).write_all(b"foo\n")); ++ assert!(!t!(repo.status_should_ignore(Path::new("bar")))); ++ assert!(t!(repo.status_should_ignore(Path::new("foo")))); ++ } ++ ++ #[test] ++ fn status_file() { ++ let (td, repo) = ::test::repo_init(); ++ assert!(repo.status_file(Path::new("foo")).is_err()); ++ if cfg!(windows) { ++ assert!(repo.status_file(Path::new("bar\\foo.txt")).is_err()); ++ } ++ t!(File::create(td.path().join("foo"))); ++ if cfg!(windows) { ++ t!(::std::fs::create_dir_all(td.path().join("bar"))); ++ t!(File::create(td.path().join("bar").join("foo.txt"))); ++ } ++ let status = t!(repo.status_file(Path::new("foo"))); ++ assert!(status.contains(::Status::WT_NEW)); ++ if cfg!(windows) { ++ let status = t!(repo.status_file(Path::new("bar\\foo.txt"))); ++ assert!(status.contains(::Status::WT_NEW)); ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/string_array.rs index 000000000,000000000..97af2090e new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/string_array.rs @@@ -1,0 -1,0 +1,117 @@@ ++//! Bindings to libgit2's raw `git_strarray` type ++ ++use std::str; ++use std::ops::Range; ++ ++use raw; ++use util::Binding; ++ ++/// A string array structure used by libgit2 ++/// ++/// Some apis return arrays of strings which originate from libgit2. This ++/// wrapper type behaves a little like `Vec<&str>` but does so without copying ++/// the underlying strings until necessary. ++pub struct StringArray { ++ raw: raw::git_strarray, ++} ++ ++/// A forward iterator over the strings of an array, casted to `&str`. ++pub struct Iter<'a> { ++ range: Range, ++ arr: &'a StringArray, ++} ++ ++/// A forward iterator over the strings of an array, casted to `&[u8]`. ++pub struct IterBytes<'a> { ++ range: Range, ++ arr: &'a StringArray, ++} ++ ++impl StringArray { ++ /// Returns None if the i'th string is not utf8 or if i is out of bounds. ++ pub fn get(&self, i: usize) -> Option<&str> { ++ self.get_bytes(i).and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Returns None if `i` is out of bounds. ++ pub fn get_bytes(&self, i: usize) -> Option<&[u8]> { ++ if i < self.raw.count as usize { ++ unsafe { ++ let ptr = *self.raw.strings.offset(i as isize) as *const _; ++ Some(::opt_bytes(self, ptr).unwrap()) ++ } ++ } else { ++ None ++ } ++ } ++ ++ /// Returns an iterator over the strings contained within this array. ++ /// ++ /// The iterator yields `Option<&str>` as it is unknown whether the contents ++ /// are utf-8 or not. ++ pub fn iter(&self) -> Iter { ++ Iter { range: 0..self.len(), arr: self } ++ } ++ ++ /// Returns an iterator over the strings contained within this array, ++ /// yielding byte slices. ++ pub fn iter_bytes(&self) -> IterBytes { ++ IterBytes { range: 0..self.len(), arr: self } ++ } ++ ++ /// Returns the number of strings in this array. ++ pub fn len(&self) -> usize { self.raw.count as usize } ++ ++ /// Return `true` if this array is empty. ++ pub fn is_empty(&self) -> bool { self.len() == 0 } ++} ++ ++impl Binding for StringArray { ++ type Raw = raw::git_strarray; ++ unsafe fn from_raw(raw: raw::git_strarray) -> StringArray { ++ StringArray { raw: raw } ++ } ++ fn raw(&self) -> raw::git_strarray { self.raw } ++} ++ ++impl<'a> IntoIterator for &'a StringArray { ++ type Item = Option<&'a str>; ++ type IntoIter = Iter<'a>; ++ fn into_iter(self) -> Self::IntoIter { ++ self.iter() ++ } ++} ++ ++impl<'a> Iterator for Iter<'a> { ++ type Item = Option<&'a str>; ++ fn next(&mut self) -> Option> { ++ self.range.next().map(|i| self.arr.get(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'a> DoubleEndedIterator for Iter<'a> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().map(|i| self.arr.get(i)) ++ } ++} ++impl<'a> ExactSizeIterator for Iter<'a> {} ++ ++impl<'a> Iterator for IterBytes<'a> { ++ type Item = &'a [u8]; ++ fn next(&mut self) -> Option<&'a [u8]> { ++ self.range.next().and_then(|i| self.arr.get_bytes(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'a> DoubleEndedIterator for IterBytes<'a> { ++ fn next_back(&mut self) -> Option<&'a [u8]> { ++ self.range.next_back().and_then(|i| self.arr.get_bytes(i)) ++ } ++} ++impl<'a> ExactSizeIterator for IterBytes<'a> {} ++ ++impl Drop for StringArray { ++ fn drop(&mut self) { ++ unsafe { raw::git_strarray_free(&mut self.raw) } ++ } ++} diff --cc vendor/git2-0.7.5/src/submodule.rs index 000000000,000000000..9cdb3566f new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/submodule.rs @@@ -1,0 -1,0 +1,344 @@@ ++use std::marker; ++use std::mem; ++use std::ptr; ++use std::str; ++use std::os::raw::c_int; ++use std::path::Path; ++ ++use {raw, Oid, Repository, Error, FetchOptions}; ++use build::CheckoutBuilder; ++use util::{self, Binding}; ++ ++/// A structure to represent a git [submodule][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Tools-Submodules ++pub struct Submodule<'repo> { ++ raw: *mut raw::git_submodule, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> Submodule<'repo> { ++ /// Get the submodule's branch. ++ /// ++ /// Returns `None` if the branch is not valid utf-8 or if the branch is not ++ /// yet available. ++ pub fn branch(&self) -> Option<&str> { ++ self.branch_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the branch for the submodule. ++ /// ++ /// Returns `None` if the branch is not yet available. ++ pub fn branch_bytes(&self) -> Option<&[u8]> { ++ unsafe { ++ ::opt_bytes(self, raw::git_submodule_branch(self.raw)) ++ } ++ } ++ ++ /// Get the submodule's url. ++ /// ++ /// Returns `None` if the url is not valid utf-8 ++ pub fn url(&self) -> Option<&str> { str::from_utf8(self.url_bytes()).ok() } ++ ++ /// Get the url for the submodule. ++ pub fn url_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_submodule_url(self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the submodule's name. ++ /// ++ /// Returns `None` if the name is not valid utf-8 ++ pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } ++ ++ /// Get the name for the submodule. ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_submodule_name(self.raw)).unwrap() ++ } ++ } ++ ++ /// Get the path for the submodule. ++ pub fn path(&self) -> &Path { ++ util::bytes2path(unsafe { ++ ::opt_bytes(self, raw::git_submodule_path(self.raw)).unwrap() ++ }) ++ } ++ ++ /// Get the OID for the submodule in the current HEAD tree. ++ pub fn head_id(&self) -> Option { ++ unsafe { ++ Binding::from_raw_opt(raw::git_submodule_head_id(self.raw)) ++ } ++ } ++ ++ /// Get the OID for the submodule in the index. ++ pub fn index_id(&self) -> Option { ++ unsafe { ++ Binding::from_raw_opt(raw::git_submodule_index_id(self.raw)) ++ } ++ } ++ ++ /// Get the OID for the submodule in the current working directory. ++ /// ++ /// This returns the OID that corresponds to looking up 'HEAD' in the ++ /// checked out submodule. If there are pending changes in the index or ++ /// anything else, this won't notice that. ++ pub fn workdir_id(&self) -> Option { ++ unsafe { ++ Binding::from_raw_opt(raw::git_submodule_wd_id(self.raw)) ++ } ++ } ++ ++ /// Copy submodule info into ".git/config" file. ++ /// ++ /// Just like "git submodule init", this copies information about the ++ /// submodule into ".git/config". You can use the accessor functions above ++ /// to alter the in-memory git_submodule object and control what is written ++ /// to the config, overriding what is in .gitmodules. ++ /// ++ /// By default, existing entries will not be overwritten, but passing `true` ++ /// for `overwrite` forces them to be updated. ++ pub fn init(&mut self, overwrite: bool) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_submodule_init(self.raw, overwrite)); ++ } ++ Ok(()) ++ } ++ ++ /// Open the repository for a submodule. ++ /// ++ /// This will only work if the submodule is checked out into the working ++ /// directory. ++ pub fn open(&self) -> Result { ++ let mut raw = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_submodule_open(&mut raw, self.raw)); ++ Ok(Binding::from_raw(raw)) ++ } ++ } ++ ++ /// Reread submodule info from config, index, and HEAD. ++ /// ++ /// Call this to reread cached submodule information for this submodule if ++ /// you have reason to believe that it has changed. ++ /// ++ /// If `force` is `true`, then data will be reloaded even if it doesn't seem ++ /// out of date ++ pub fn reload(&mut self, force: bool) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_submodule_reload(self.raw, force)); ++ } ++ Ok(()) ++ } ++ ++ /// Copy submodule remote info into submodule repo. ++ /// ++ /// This copies the information about the submodules URL into the checked ++ /// out submodule config, acting like "git submodule sync". This is useful ++ /// if you have altered the URL for the submodule (or it has been altered ++ /// by a fetch of upstream changes) and you need to update your local repo. ++ pub fn sync(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_submodule_sync(self.raw)); } ++ Ok(()) ++ } ++ ++ /// Add current submodule HEAD commit to index of superproject. ++ /// ++ /// If `write_index` is true, then the index file will be immediately ++ /// written. Otherwise you must explicitly call `write()` on an `Index` ++ /// later on. ++ pub fn add_to_index(&mut self, write_index: bool) -> Result<(), Error> { ++ unsafe { ++ try_call!(raw::git_submodule_add_to_index(self.raw, write_index)); ++ } ++ Ok(()) ++ } ++ ++ /// Resolve the setup of a new git submodule. ++ /// ++ /// This should be called on a submodule once you have called add setup and ++ /// done the clone of the submodule. This adds the .gitmodules file and the ++ /// newly cloned submodule to the index to be ready to be committed (but ++ /// doesn't actually do the commit). ++ pub fn add_finalize(&mut self) -> Result<(), Error> { ++ unsafe { try_call!(raw::git_submodule_add_finalize(self.raw)); } ++ Ok(()) ++ } ++ ++ /// Update submodule. ++ /// ++ /// This will clone a missing submodule and check out the subrepository to ++ /// the commit specified in the index of the containing repository. If ++ /// the submodule repository doesn't contain the target commit, then the ++ /// submodule is fetched using the fetch options supplied in `opts`. ++ /// ++ /// `init` indicates if the submodule should be initialized first if it has ++ /// not been initialized yet. ++ pub fn update(&mut self, init: bool, ++ opts: Option<&mut SubmoduleUpdateOptions>) ++ -> Result<(), Error> { ++ unsafe { ++ let mut raw_opts = opts.map(|o| o.raw()); ++ try_call!(raw::git_submodule_update(self.raw, init as c_int, ++ raw_opts.as_mut().map_or(ptr::null_mut(), |o| o))); ++ } ++ Ok(()) ++ } ++} ++ ++impl<'repo> Binding for Submodule<'repo> { ++ type Raw = *mut raw::git_submodule; ++ unsafe fn from_raw(raw: *mut raw::git_submodule) -> Submodule<'repo> { ++ Submodule { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_submodule { self.raw } ++} ++ ++impl<'repo> Drop for Submodule<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_submodule_free(self.raw) } ++ } ++} ++ ++/// Options to update a submodule. ++pub struct SubmoduleUpdateOptions<'cb> { ++ checkout_builder: CheckoutBuilder<'cb>, ++ fetch_opts: FetchOptions<'cb>, ++ allow_fetch: bool, ++} ++ ++impl<'cb> SubmoduleUpdateOptions<'cb> { ++ /// Return default options. ++ pub fn new() -> Self { ++ SubmoduleUpdateOptions { ++ checkout_builder: CheckoutBuilder::new(), ++ fetch_opts: FetchOptions::new(), ++ allow_fetch: true, ++ } ++ } ++ ++ unsafe fn raw(&mut self) -> raw::git_submodule_update_options { ++ let mut checkout_opts: raw::git_checkout_options = mem::zeroed(); ++ let init_res = raw::git_checkout_init_options(&mut checkout_opts, ++ raw::GIT_CHECKOUT_OPTIONS_VERSION); ++ assert_eq!(0, init_res); ++ self.checkout_builder.configure(&mut checkout_opts); ++ let opts = raw::git_submodule_update_options { ++ version: raw::GIT_SUBMODULE_UPDATE_OPTIONS_VERSION, ++ checkout_opts, ++ fetch_opts: self.fetch_opts.raw(), ++ allow_fetch: self.allow_fetch as c_int, ++ }; ++ opts ++ } ++ ++ /// Set checkout options. ++ pub fn checkout(&mut self, opts: CheckoutBuilder<'cb>) -> &mut Self { ++ self.checkout_builder = opts; ++ self ++ } ++ ++ /// Set fetch options and allow fetching. ++ pub fn fetch(&mut self, opts: FetchOptions<'cb>) -> &mut Self { ++ self.fetch_opts = opts; ++ self.allow_fetch = true; ++ self ++ } ++ ++ /// Allow or disallow fetching. ++ pub fn allow_fetch(&mut self, b: bool) -> &mut Self { ++ self.allow_fetch = b; ++ self ++ } ++} ++ ++impl<'cb> Default for SubmoduleUpdateOptions<'cb> { ++ fn default() -> Self { ++ Self::new() ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::path::Path; ++ use std::fs; ++ use tempdir::TempDir; ++ use url::Url; ++ ++ use Repository; ++ use SubmoduleUpdateOptions; ++ ++ #[test] ++ fn smoke() { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ let mut s1 = repo.submodule("/path/to/nowhere", ++ Path::new("foo"), true).unwrap(); ++ s1.init(false).unwrap(); ++ s1.sync().unwrap(); ++ ++ let s2 = repo.submodule("/path/to/nowhere", ++ Path::new("bar"), true).unwrap(); ++ drop((s1, s2)); ++ ++ let mut submodules = repo.submodules().unwrap(); ++ assert_eq!(submodules.len(), 2); ++ let mut s = submodules.remove(0); ++ assert_eq!(s.name(), Some("bar")); ++ assert_eq!(s.url(), Some("/path/to/nowhere")); ++ assert_eq!(s.branch(), None); ++ assert!(s.head_id().is_none()); ++ assert!(s.index_id().is_none()); ++ assert!(s.workdir_id().is_none()); ++ ++ repo.find_submodule("bar").unwrap(); ++ s.open().unwrap(); ++ assert!(s.path() == Path::new("bar")); ++ s.reload(true).unwrap(); ++ } ++ ++ #[test] ++ fn add_a_submodule() { ++ let (_td, repo1) = ::test::repo_init(); ++ let (td, repo2) = ::test::repo_init(); ++ ++ let url = Url::from_file_path(&repo1.workdir().unwrap()).unwrap(); ++ let mut s = repo2.submodule(&url.to_string(), Path::new("bar"), ++ true).unwrap(); ++ t!(fs::remove_dir_all(td.path().join("bar"))); ++ t!(Repository::clone(&url.to_string(), ++ td.path().join("bar"))); ++ t!(s.add_to_index(false)); ++ t!(s.add_finalize()); ++ } ++ ++ #[test] ++ fn update_submodule() { ++ // ----------------------------------- ++ // Same as `add_a_submodule()` ++ let (_td, repo1) = ::test::repo_init(); ++ let (td, repo2) = ::test::repo_init(); ++ ++ let url = Url::from_file_path(&repo1.workdir().unwrap()).unwrap(); ++ let mut s = repo2.submodule(&url.to_string(), Path::new("bar"), ++ true).unwrap(); ++ t!(fs::remove_dir_all(td.path().join("bar"))); ++ t!(Repository::clone(&url.to_string(), ++ td.path().join("bar"))); ++ t!(s.add_to_index(false)); ++ t!(s.add_finalize()); ++ // ----------------------------------- ++ ++ // Attempt to update submodule ++ let submodules = t!(repo1.submodules()); ++ for mut submodule in submodules { ++ let mut submodule_options = SubmoduleUpdateOptions::new(); ++ let init = true; ++ let opts = Some(&mut submodule_options); ++ ++ t!(submodule.update(init, opts)); ++ } ++ } ++} diff --cc vendor/git2-0.7.5/src/tag.rs index 000000000,000000000..8041f3546 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/tag.rs @@@ -1,0 -1,0 +1,191 @@@ ++use std::marker; ++use std::mem; ++use std::ptr; ++use std::str; ++ ++use {raw, signature, Error, Oid, Object, Signature, ObjectType}; ++use util::Binding; ++ ++/// A structure to represent a git [tag][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Basics-Tagging ++pub struct Tag<'repo> { ++ raw: *mut raw::git_tag, ++ _marker: marker::PhantomData>, ++} ++ ++impl<'repo> Tag<'repo> { ++ /// Get the id (SHA1) of a repository tag ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_tag_id(&*self.raw)) } ++ } ++ ++ /// Get the message of a tag ++ /// ++ /// Returns None if there is no message or if it is not valid utf8 ++ pub fn message(&self) -> Option<&str> { ++ self.message_bytes().and_then(|s| str::from_utf8(s).ok()) ++ } ++ ++ /// Get the message of a tag ++ /// ++ /// Returns None if there is no message ++ pub fn message_bytes(&self) -> Option<&[u8]> { ++ unsafe { ::opt_bytes(self, raw::git_tag_message(&*self.raw)) } ++ } ++ ++ /// Get the name of a tag ++ /// ++ /// Returns None if it is not valid utf8 ++ pub fn name(&self) -> Option<&str> { ++ str::from_utf8(self.name_bytes()).ok() ++ } ++ ++ /// Get the name of a tag ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ::opt_bytes(self, raw::git_tag_name(&*self.raw)).unwrap() } ++ } ++ ++ /// Recursively peel a tag until a non tag git_object is found ++ pub fn peel(&self) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tag_peel(&mut ret, &*self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get the tagger (author) of a tag ++ /// ++ /// If the author is unspecified, then `None` is returned. ++ pub fn tagger(&self) -> Option { ++ unsafe { ++ let ptr = raw::git_tag_tagger(&*self.raw); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(signature::from_raw_const(self, ptr)) ++ } ++ } ++ } ++ ++ /// Get the tagged object of a tag ++ /// ++ /// This method performs a repository lookup for the given object and ++ /// returns it ++ pub fn target(&self) -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tag_target(&mut ret, &*self.raw)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get the OID of the tagged object of a tag ++ pub fn target_id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_tag_target_id(&*self.raw)) } ++ } ++ ++ /// Get the OID of the tagged object of a tag ++ pub fn target_type(&self) -> Option { ++ unsafe { ObjectType::from_raw(raw::git_tag_target_type(&*self.raw)) } ++ } ++ ++ /// Casts this Tag to be usable as an `Object` ++ pub fn as_object(&self) -> &Object<'repo> { ++ unsafe { ++ &*(self as *const _ as *const Object<'repo>) ++ } ++ } ++ ++ /// Consumes Tag to be returned as an `Object` ++ pub fn into_object(self) -> Object<'repo> { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ unsafe { ++ mem::transmute(self) ++ } ++ } ++} ++ ++impl<'repo> ::std::fmt::Debug for Tag<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ let mut ds = f.debug_struct("Tag"); ++ if let Some(name) = self.name() { ++ ds.field("name", &name); ++ } ++ ds.field("id", &self.id()); ++ ds.finish() ++ } ++} ++ ++impl<'repo> Binding for Tag<'repo> { ++ type Raw = *mut raw::git_tag; ++ unsafe fn from_raw(raw: *mut raw::git_tag) -> Tag<'repo> { ++ Tag { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_tag { self.raw } ++} ++ ++impl<'repo> Clone for Tag<'repo> { ++ fn clone(&self) -> Self { ++ self.as_object().clone().into_tag().ok().unwrap() ++ } ++} ++ ++impl<'repo> Drop for Tag<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_tag_free(self.raw) } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = repo.head().unwrap(); ++ let id = head.target().unwrap(); ++ assert!(repo.find_tag(id).is_err()); ++ ++ let obj = repo.find_object(id, None).unwrap(); ++ let sig = repo.signature().unwrap(); ++ let tag_id = repo.tag("foo", &obj, &sig, "msg", false).unwrap(); ++ let tag = repo.find_tag(tag_id).unwrap(); ++ assert_eq!(tag.id(), tag_id); ++ ++ let tags = repo.tag_names(None).unwrap(); ++ assert_eq!(tags.len(), 1); ++ assert_eq!(tags.get(0), Some("foo")); ++ ++ assert_eq!(tag.name(), Some("foo")); ++ assert_eq!(tag.message(), Some("msg")); ++ assert_eq!(tag.peel().unwrap().id(), obj.id()); ++ assert_eq!(tag.target_id(), obj.id()); ++ assert_eq!(tag.target_type(), Some(::ObjectType::Commit)); ++ ++ assert_eq!(tag.tagger().unwrap().name(), sig.name()); ++ tag.target().unwrap(); ++ tag.into_object(); ++ ++ repo.find_object(tag_id, None).unwrap().as_tag().unwrap(); ++ repo.find_object(tag_id, None).unwrap().into_tag().ok().unwrap(); ++ ++ repo.tag_delete("foo").unwrap(); ++ } ++ ++ #[test] ++ fn lite() { ++ let (_td, repo) = ::test::repo_init(); ++ let head = t!(repo.head()); ++ let id = head.target().unwrap(); ++ let obj = t!(repo.find_object(id, None)); ++ let tag_id = t!(repo.tag_lightweight("foo", &obj, false)); ++ assert!(repo.find_tag(tag_id).is_err()); ++ assert_eq!(t!(repo.refname_to_id("refs/tags/foo")), id); ++ ++ let tags = t!(repo.tag_names(Some("f*"))); ++ assert_eq!(tags.len(), 1); ++ let tags = t!(repo.tag_names(Some("b*"))); ++ assert_eq!(tags.len(), 0); ++ } ++} diff --cc vendor/git2-0.7.5/src/test.rs index 000000000,000000000..4f8813fda new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/test.rs @@@ -1,0 -1,0 +1,61 @@@ ++use std::path::{Path, PathBuf}; ++use std::io; ++#[cfg(unix)] ++use std::ptr; ++use tempdir::TempDir; ++use url::Url; ++ ++use Repository; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(e) => e, ++ Err(e) => panic!("{} failed with {}", stringify!($e), e), ++ }) ++} ++ ++pub fn repo_init() -> (TempDir, Repository) { ++ let td = TempDir::new("test").unwrap(); ++ let repo = Repository::init(td.path()).unwrap(); ++ { ++ let mut config = repo.config().unwrap(); ++ config.set_str("user.name", "name").unwrap(); ++ config.set_str("user.email", "email").unwrap(); ++ let mut index = repo.index().unwrap(); ++ let id = index.write_tree().unwrap(); ++ ++ let tree = repo.find_tree(id).unwrap(); ++ let sig = repo.signature().unwrap(); ++ repo.commit(Some("HEAD"), &sig, &sig, "initial", ++ &tree, &[]).unwrap(); ++ } ++ (td, repo) ++} ++ ++pub fn path2url(path: &Path) -> String { ++ Url::from_file_path(path).unwrap().to_string() ++} ++ ++#[cfg(windows)] ++pub fn realpath(original: &Path) -> io::Result { ++ Ok(original.to_path_buf()) ++} ++#[cfg(unix)] ++pub fn realpath(original: &Path) -> io::Result { ++ use std::ffi::{CStr, OsString, CString}; ++ use std::os::unix::prelude::*; ++ use libc::{self, c_char}; ++ extern { ++ fn realpath(name: *const c_char, resolved: *mut c_char) -> *mut c_char; ++ } ++ unsafe { ++ let cstr = try!(CString::new(original.as_os_str().as_bytes())); ++ let ptr = realpath(cstr.as_ptr(), ptr::null_mut()); ++ if ptr.is_null() { ++ return Err(io::Error::last_os_error()) ++ } ++ let bytes = CStr::from_ptr(ptr).to_bytes().to_vec(); ++ libc::free(ptr as *mut _); ++ Ok(PathBuf::from(OsString::from_vec(bytes))) ++ } ++} diff --cc vendor/git2-0.7.5/src/time.rs index 000000000,000000000..57a5a70a3 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/time.rs @@@ -1,0 -1,0 +1,100 @@@ ++use std::cmp::Ordering; ++ ++use libc::{c_int, c_char}; ++ ++use raw; ++use util::Binding; ++ ++/// Time in a signature ++#[derive(Copy, Clone, Eq, PartialEq)] ++pub struct Time { ++ raw: raw::git_time, ++} ++ ++/// Time structure used in a git index entry. ++#[derive(Copy, Clone, Eq, PartialEq)] ++pub struct IndexTime { ++ raw: raw::git_index_time, ++} ++ ++impl Time { ++ /// Creates a new time structure from its components. ++ pub fn new(time: i64, offset: i32) -> Time { ++ unsafe { ++ Binding::from_raw(raw::git_time { ++ time: time as raw::git_time_t, ++ offset: offset as c_int, ++ sign: if offset < 0 { '-' } else { '+' } as c_char, ++ }) ++ } ++ } ++ ++ /// Return the time, in seconds, from epoch ++ pub fn seconds(&self) -> i64 { self.raw.time as i64 } ++ ++ /// Return the timezone offset, in minutes ++ pub fn offset_minutes(&self) -> i32 { self.raw.offset as i32 } ++ ++ /// Return whether the offset was positive or negative. Primarily useful ++ /// in case the offset is specified as a negative zero. ++ pub fn sign(&self) -> char { self.raw.offset as u8 as char } ++} ++ ++impl PartialOrd for Time { ++ fn partial_cmp(&self, other: &Time) -> Option { ++ Some(self.cmp(other)) ++ } ++} ++ ++impl Ord for Time { ++ fn cmp(&self, other: &Time) -> Ordering { ++ (self.raw.time, self.raw.offset).cmp(&(other.raw.time, other.raw.offset)) ++ } ++} ++ ++impl Binding for Time { ++ type Raw = raw::git_time; ++ unsafe fn from_raw(raw: raw::git_time) -> Time { ++ Time { raw: raw } ++ } ++ fn raw(&self) -> raw::git_time { self.raw } ++} ++ ++impl IndexTime { ++ /// Creates a new time structure from its components. ++ pub fn new(seconds: i32, nanoseconds: u32) -> IndexTime { ++ unsafe { ++ Binding::from_raw(raw::git_index_time { ++ seconds: seconds, ++ nanoseconds: nanoseconds, ++ }) ++ } ++ } ++ ++ /// Returns the number of seconds in the second component of this time. ++ pub fn seconds(&self) -> i32 { self.raw.seconds } ++ /// Returns the nanosecond component of this time. ++ pub fn nanoseconds(&self) -> u32 { self.raw.nanoseconds } ++} ++ ++impl Binding for IndexTime { ++ type Raw = raw::git_index_time; ++ unsafe fn from_raw(raw: raw::git_index_time) -> IndexTime { ++ IndexTime { raw: raw } ++ } ++ fn raw(&self) -> raw::git_index_time { self.raw } ++} ++ ++impl PartialOrd for IndexTime { ++ fn partial_cmp(&self, other: &IndexTime) -> Option { ++ Some(self.cmp(other)) ++ } ++} ++ ++impl Ord for IndexTime { ++ fn cmp(&self, other: &IndexTime) -> Ordering { ++ let me = (self.raw.seconds, self.raw.nanoseconds); ++ let other = (other.raw.seconds, other.raw.nanoseconds); ++ me.cmp(&other) ++ } ++} diff --cc vendor/git2-0.7.5/src/transport.rs index 000000000,000000000..d34db9f88 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/transport.rs @@@ -1,0 -1,0 +1,326 @@@ ++//! Interfaces for adding custom transports to libgit2 ++ ++use std::ffi::{CStr, CString}; ++use std::io::prelude::*; ++use std::io; ++use std::mem; ++use std::slice; ++use std::ptr; ++use std::str; ++use libc::{c_int, c_void, c_uint, c_char, size_t}; ++ ++use {raw, panic, Error, Remote}; ++use util::Binding; ++ ++/// A transport is a structure which knows how to transfer data to and from a ++/// remote. ++/// ++/// This transport is a representation of the raw transport underneath it, which ++/// is similar to a trait object in Rust. ++#[allow(missing_copy_implementations)] ++pub struct Transport { ++ raw: *mut raw::git_transport, ++ owned: bool, ++} ++ ++/// Interface used by smart transports. ++/// ++/// The full-fledged definiton of transports has to deal with lots of ++/// nitty-gritty details of the git protocol, but "smart transports" largely ++/// only need to deal with read() and write() of data over a channel. ++/// ++/// A smart subtransport is contained within an instance of a smart transport ++/// and is delegated to in order to actually conduct network activity to push or ++/// pull data from a remote. ++pub trait SmartSubtransport: Send + 'static { ++ /// Indicates that this subtransport will be performing the specified action ++ /// on the specified URL. ++ /// ++ /// This function is responsible for making any network connections and ++ /// returns a stream which can be read and written from in order to ++ /// negotiate the git protocol. ++ fn action(&self, url: &str, action: Service) ++ -> Result, Error>; ++ ++ /// Terminates a connection with the remote. ++ /// ++ /// Each subtransport is guaranteed a call to close() between calls to ++ /// action(), except for the following two natural progressions of actions ++ /// against a constant URL. ++ /// ++ /// 1. UploadPackLs -> UploadPack ++ /// 2. ReceivePackLs -> ReceivePack ++ fn close(&self) -> Result<(), Error>; ++} ++ ++/// Actions that a smart transport can ask a subtransport to perform ++#[derive(Copy, Clone)] ++#[allow(missing_docs)] ++pub enum Service { ++ UploadPackLs, ++ UploadPack, ++ ReceivePackLs, ++ ReceivePack, ++} ++ ++/// An instance of a stream over which a smart transport will communicate with a ++/// remote. ++/// ++/// Currently this only requires the standard `Read` and `Write` traits. This ++/// trait also does not need to be implemented manually as long as the `Read` ++/// and `Write` traits are implemented. ++pub trait SmartSubtransportStream: Read + Write + Send + 'static {} ++ ++impl SmartSubtransportStream for T {} ++ ++type TransportFactory = Fn(&Remote) -> Result + Send + Sync + ++ 'static; ++ ++/// Boxed data payload used for registering new transports. ++/// ++/// Currently only contains a field which knows how to create transports. ++struct TransportData { ++ factory: Box, ++} ++ ++/// Instance of a `git_smart_subtransport`, must use `#[repr(C)]` to ensure that ++/// the C fields come first. ++#[repr(C)] ++struct RawSmartSubtransport { ++ raw: raw::git_smart_subtransport, ++ obj: Box, ++} ++ ++/// Instance of a `git_smart_subtransport_stream`, must use `#[repr(C)]` to ++/// ensure that the C fields come first. ++#[repr(C)] ++struct RawSmartSubtransportStream { ++ raw: raw::git_smart_subtransport_stream, ++ obj: Box, ++} ++ ++/// Add a custom transport definition, to be used in addition to the built-in ++/// set of transports that come with libgit2. ++/// ++/// This function is unsafe as it needs to be externally synchronized with calls ++/// to creation of other transports. ++pub unsafe fn register(prefix: &str, factory: F) -> Result<(), Error> ++ where F: Fn(&Remote) -> Result + Send + Sync + 'static ++{ ++ let mut data = Box::new(TransportData { ++ factory: Box::new(factory), ++ }); ++ let prefix = try!(CString::new(prefix)); ++ let datap = (&mut *data) as *mut TransportData as *mut c_void; ++ try_call!(raw::git_transport_register(prefix, ++ transport_factory, ++ datap)); ++ mem::forget(data); ++ Ok(()) ++} ++ ++impl Transport { ++ /// Creates a new transport which will use the "smart" transport protocol ++ /// for transferring data. ++ /// ++ /// A smart transport requires a *subtransport* over which data is actually ++ /// communicated, but this subtransport largely just needs to be able to ++ /// read() and write(). The subtransport provided will be used to make ++ /// connections which can then be read/written from. ++ /// ++ /// The `rpc` argument is `true` if the protocol is stateless, false ++ /// otherwise. For example `http://` is stateless but `git://` is not. ++ pub fn smart(remote: &Remote, ++ rpc: bool, ++ subtransport: S) -> Result ++ where S: SmartSubtransport ++ { ++ let mut ret = ptr::null_mut(); ++ ++ let mut raw = Box::new(RawSmartSubtransport { ++ raw: raw::git_smart_subtransport { ++ action: subtransport_action, ++ close: subtransport_close, ++ free: subtransport_free, ++ }, ++ obj: Box::new(subtransport), ++ }); ++ let mut defn = raw::git_smart_subtransport_definition { ++ callback: smart_factory, ++ rpc: rpc as c_uint, ++ param: &mut *raw as *mut _ as *mut _, ++ }; ++ ++ // Currently there's no way to pass a payload via the ++ // git_smart_subtransport_definition structure, but it's only used as a ++ // configuration for the initial creation of the smart transport (verified ++ // by reading the current code, hopefully it doesn't change!). ++ // ++ // We, however, need some state (gotta pass in our ++ // `RawSmartSubtransport`). This also means that this block must be ++ // entirely synchronized with a lock (boo!) ++ unsafe { ++ try_call!(raw::git_transport_smart(&mut ret, remote.raw(), ++ &mut defn as *mut _ as *mut _)); ++ mem::forget(raw); // ownership transport to `ret` ++ } ++ return Ok(Transport { raw: ret, owned: true }); ++ ++ extern fn smart_factory(out: *mut *mut raw::git_smart_subtransport, ++ _owner: *mut raw::git_transport, ++ ptr: *mut c_void) -> c_int { ++ unsafe { ++ *out = ptr as *mut raw::git_smart_subtransport; ++ 0 ++ } ++ } ++ } ++} ++ ++impl Drop for Transport { ++ fn drop(&mut self) { ++ if self.owned { ++ unsafe { ++ ((*self.raw).free)(self.raw) ++ } ++ } ++ } ++} ++ ++// callback used by register() to create new transports ++extern fn transport_factory(out: *mut *mut raw::git_transport, ++ owner: *mut raw::git_remote, ++ param: *mut c_void) -> c_int { ++ struct Bomb<'a> { remote: Option> } ++ impl<'a> Drop for Bomb<'a> { ++ fn drop(&mut self) { ++ // TODO: maybe a method instead? ++ mem::forget(self.remote.take()); ++ } ++ } ++ ++ panic::wrap(|| unsafe { ++ let remote = Bomb { remote: Some(Binding::from_raw(owner)) }; ++ let data = &mut *(param as *mut TransportData); ++ match (data.factory)(remote.remote.as_ref().unwrap()) { ++ Ok(mut transport) => { ++ *out = transport.raw; ++ transport.owned = false; ++ 0 ++ } ++ Err(e) => e.raw_code() as c_int, ++ } ++ }).unwrap_or(-1) ++} ++ ++// callback used by smart transports to delegate an action to a ++// `SmartSubtransport` trait object. ++extern fn subtransport_action(stream: *mut *mut raw::git_smart_subtransport_stream, ++ raw_transport: *mut raw::git_smart_subtransport, ++ url: *const c_char, ++ action: raw::git_smart_service_t) -> c_int { ++ panic::wrap(|| unsafe { ++ let url = CStr::from_ptr(url).to_bytes(); ++ let url = match str::from_utf8(url).ok() { ++ Some(s) => s, ++ None => return -1, ++ }; ++ let action = match action { ++ raw::GIT_SERVICE_UPLOADPACK_LS => Service::UploadPackLs, ++ raw::GIT_SERVICE_UPLOADPACK => Service::UploadPack, ++ raw::GIT_SERVICE_RECEIVEPACK_LS => Service::ReceivePackLs, ++ raw::GIT_SERVICE_RECEIVEPACK => Service::ReceivePack, ++ n => panic!("unknown action: {}", n), ++ }; ++ let transport = &mut *(raw_transport as *mut RawSmartSubtransport); ++ let obj = match transport.obj.action(url, action) { ++ Ok(s) => s, ++ Err(e) => return e.raw_code() as c_int, ++ }; ++ *stream = mem::transmute(Box::new(RawSmartSubtransportStream { ++ raw: raw::git_smart_subtransport_stream { ++ subtransport: raw_transport, ++ read: stream_read, ++ write: stream_write, ++ free: stream_free, ++ }, ++ obj: obj, ++ })); ++ 0 ++ }).unwrap_or(-1) ++} ++ ++// callback used by smart transports to close a `SmartSubtransport` trait ++// object. ++extern fn subtransport_close(transport: *mut raw::git_smart_subtransport) ++ -> c_int { ++ let ret = panic::wrap(|| unsafe { ++ let transport = &mut *(transport as *mut RawSmartSubtransport); ++ transport.obj.close() ++ }); ++ match ret { ++ Some(Ok(())) => 0, ++ Some(Err(e)) => e.raw_code() as c_int, ++ None => -1, ++ } ++} ++ ++// callback used by smart transports to free a `SmartSubtransport` trait ++// object. ++extern fn subtransport_free(transport: *mut raw::git_smart_subtransport) { ++ let _ = panic::wrap(|| unsafe { ++ mem::transmute::<_, Box>(transport); ++ }); ++} ++ ++// callback used by smart transports to read from a `SmartSubtransportStream` ++// object. ++extern fn stream_read(stream: *mut raw::git_smart_subtransport_stream, ++ buffer: *mut c_char, ++ buf_size: size_t, ++ bytes_read: *mut size_t) -> c_int { ++ let ret = panic::wrap(|| unsafe { ++ let transport = &mut *(stream as *mut RawSmartSubtransportStream); ++ let buf = slice::from_raw_parts_mut(buffer as *mut u8, ++ buf_size as usize); ++ match transport.obj.read(buf) { ++ Ok(n) => { *bytes_read = n as size_t; Ok(n) } ++ e => e, ++ } ++ }); ++ match ret { ++ Some(Ok(_)) => 0, ++ Some(Err(e)) => unsafe { set_err(&e); -2 }, ++ None => -1, ++ } ++} ++ ++// callback used by smart transports to write to a `SmartSubtransportStream` ++// object. ++extern fn stream_write(stream: *mut raw::git_smart_subtransport_stream, ++ buffer: *const c_char, ++ len: size_t) -> c_int { ++ let ret = panic::wrap(|| unsafe { ++ let transport = &mut *(stream as *mut RawSmartSubtransportStream); ++ let buf = slice::from_raw_parts(buffer as *const u8, len as usize); ++ transport.obj.write_all(buf) ++ }); ++ match ret { ++ Some(Ok(())) => 0, ++ Some(Err(e)) => unsafe { set_err(&e); -2 }, ++ None => -1, ++ } ++} ++ ++unsafe fn set_err(e: &io::Error) { ++ let s = CString::new(e.to_string()).unwrap(); ++ raw::giterr_set_str(raw::GITERR_NET as c_int, s.as_ptr()) ++} ++ ++// callback used by smart transports to free a `SmartSubtransportStream` ++// object. ++extern fn stream_free(stream: *mut raw::git_smart_subtransport_stream) { ++ let _ = panic::wrap(|| unsafe { ++ mem::transmute::<_, Box>(stream); ++ }); ++} diff --cc vendor/git2-0.7.5/src/tree.rs index 000000000,000000000..a2006f3c5 new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/tree.rs @@@ -1,0 -1,0 +1,406 @@@ ++use std::mem; ++use std::cmp::Ordering; ++use std::ffi::CString; ++use std::ops::Range; ++use std::marker; ++use std::path::Path; ++use std::ptr; ++use std::str; ++use libc; ++ ++use {raw, Oid, Repository, Error, Object, ObjectType}; ++use util::{Binding, IntoCString}; ++ ++/// A structure to represent a git [tree][1] ++/// ++/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects ++pub struct Tree<'repo> { ++ raw: *mut raw::git_tree, ++ _marker: marker::PhantomData>, ++} ++ ++/// A structure representing an entry inside of a tree. An entry is borrowed ++/// from a tree. ++pub struct TreeEntry<'tree> { ++ raw: *mut raw::git_tree_entry, ++ owned: bool, ++ _marker: marker::PhantomData<&'tree raw::git_tree_entry>, ++} ++ ++/// An iterator over the entries in a tree. ++pub struct TreeIter<'tree> { ++ range: Range, ++ tree: &'tree Tree<'tree>, ++} ++ ++impl<'repo> Tree<'repo> { ++ /// Get the id (SHA1) of a repository object ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_tree_id(&*self.raw)) } ++ } ++ ++ /// Get the number of entries listed in this tree. ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_tree_entrycount(&*self.raw) as usize } ++ } ++ ++ /// Return `true` if there is not entry ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Returns an iterator over the entries in this tree. ++ pub fn iter(&self) -> TreeIter { ++ TreeIter { range: 0..self.len(), tree: self } ++ } ++ ++ /// Lookup a tree entry by SHA value. ++ pub fn get_id(&self, id: Oid) -> Option { ++ unsafe { ++ let ptr = raw::git_tree_entry_byid(&*self.raw(), &*id.raw()); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(entry_from_raw_const(ptr)) ++ } ++ } ++ } ++ ++ /// Lookup a tree entry by its position in the tree ++ pub fn get(&self, n: usize) -> Option { ++ unsafe { ++ let ptr = raw::git_tree_entry_byindex(&*self.raw(), ++ n as libc::size_t); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(entry_from_raw_const(ptr)) ++ } ++ } ++ } ++ ++ /// Lookup a tree entry by its filename ++ pub fn get_name(&self, filename: &str) -> Option { ++ let filename = CString::new(filename).unwrap(); ++ unsafe { ++ let ptr = call!(raw::git_tree_entry_byname(&*self.raw(), filename)); ++ if ptr.is_null() { ++ None ++ } else { ++ Some(entry_from_raw_const(ptr)) ++ } ++ } ++ } ++ ++ /// Retrieve a tree entry contained in a tree or in any of its subtrees, ++ /// given its relative path. ++ pub fn get_path(&self, path: &Path) -> Result, Error> { ++ let path = try!(path.into_c_string()); ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tree_entry_bypath(&mut ret, &*self.raw(), path)); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Casts this Tree to be usable as an `Object` ++ pub fn as_object(&self) -> &Object<'repo> { ++ unsafe { ++ &*(self as *const _ as *const Object<'repo>) ++ } ++ } ++ ++ /// Consumes Commit to be returned as an `Object` ++ pub fn into_object(self) -> Object<'repo> { ++ assert_eq!(mem::size_of_val(&self), mem::size_of::()); ++ unsafe { ++ mem::transmute(self) ++ } ++ } ++} ++ ++impl<'repo> Binding for Tree<'repo> { ++ type Raw = *mut raw::git_tree; ++ ++ unsafe fn from_raw(raw: *mut raw::git_tree) -> Tree<'repo> { ++ Tree { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_tree { self.raw } ++} ++ ++impl<'repo> ::std::fmt::Debug for Tree<'repo> { ++ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { ++ f.debug_struct("Tree").field("id", &self.id()).finish() ++ } ++} ++ ++impl<'repo> Clone for Tree<'repo> { ++ fn clone(&self) -> Self { ++ self.as_object().clone().into_tree().ok().unwrap() ++ } ++} ++ ++impl<'repo> Drop for Tree<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_tree_free(self.raw) } ++ } ++} ++ ++impl<'repo, 'iter> IntoIterator for &'iter Tree<'repo> { ++ type Item = TreeEntry<'iter>; ++ type IntoIter = TreeIter<'iter>; ++ fn into_iter(self) -> Self::IntoIter { ++ self.iter() ++ } ++} ++ ++/// Create a new tree entry from the raw pointer provided. ++/// ++/// The lifetime of the entry is tied to the tree provided and the function ++/// is unsafe because the validity of the pointer cannot be guaranteed. ++pub unsafe fn entry_from_raw_const<'tree>(raw: *const raw::git_tree_entry) ++ -> TreeEntry<'tree> { ++ TreeEntry { ++ raw: raw as *mut raw::git_tree_entry, ++ owned: false, ++ _marker: marker::PhantomData, ++ } ++} ++ ++impl<'tree> TreeEntry<'tree> { ++ /// Get the id of the object pointed by the entry ++ pub fn id(&self) -> Oid { ++ unsafe { Binding::from_raw(raw::git_tree_entry_id(&*self.raw)) } ++ } ++ ++ /// Get the filename of a tree entry ++ /// ++ /// Returns `None` if the name is not valid utf-8 ++ pub fn name(&self) -> Option<&str> { ++ str::from_utf8(self.name_bytes()).ok() ++ } ++ ++ /// Get the filename of a tree entry ++ pub fn name_bytes(&self) -> &[u8] { ++ unsafe { ++ ::opt_bytes(self, raw::git_tree_entry_name(&*self.raw())).unwrap() ++ } ++ } ++ ++ /// Convert a tree entry to the object it points to. ++ pub fn to_object<'a>(&self, repo: &'a Repository) ++ -> Result, Error> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ try_call!(raw::git_tree_entry_to_object(&mut ret, repo.raw(), ++ &*self.raw())); ++ Ok(Binding::from_raw(ret)) ++ } ++ } ++ ++ /// Get the type of the object pointed by the entry ++ pub fn kind(&self) -> Option { ++ ObjectType::from_raw(unsafe { raw::git_tree_entry_type(&*self.raw) }) ++ } ++ ++ /// Get the UNIX file attributes of a tree entry ++ pub fn filemode(&self) -> i32 { ++ unsafe { raw::git_tree_entry_filemode(&*self.raw) as i32 } ++ } ++ ++ /// Get the raw UNIX file attributes of a tree entry ++ pub fn filemode_raw(&self) -> i32 { ++ unsafe { raw::git_tree_entry_filemode_raw(&*self.raw) as i32 } ++ } ++ ++ /// Convert this entry of any lifetime into an owned signature with a static ++ /// lifetime. ++ /// ++ /// This will use the `Clone::clone` implementation under the hood. ++ pub fn to_owned(&self) -> TreeEntry<'static> { ++ unsafe { ++ let me = mem::transmute::<&TreeEntry<'tree>, &TreeEntry<'static>>(self); ++ me.clone() ++ } ++ } ++} ++ ++impl<'a> Binding for TreeEntry<'a> { ++ type Raw = *mut raw::git_tree_entry; ++ unsafe fn from_raw(raw: *mut raw::git_tree_entry) -> TreeEntry<'a> { ++ TreeEntry { ++ raw: raw, ++ owned: true, ++ _marker: marker::PhantomData, ++ } ++ } ++ fn raw(&self) -> *mut raw::git_tree_entry { self.raw } ++} ++ ++impl<'a> Clone for TreeEntry<'a> { ++ fn clone(&self) -> TreeEntry<'a> { ++ let mut ret = ptr::null_mut(); ++ unsafe { ++ assert_eq!(raw::git_tree_entry_dup(&mut ret, &*self.raw()), 0); ++ Binding::from_raw(ret) ++ } ++ } ++} ++ ++impl<'a> PartialOrd for TreeEntry<'a> { ++ fn partial_cmp(&self, other: &TreeEntry<'a>) -> Option { ++ Some(self.cmp(other)) ++ } ++} ++impl<'a> Ord for TreeEntry<'a> { ++ fn cmp(&self, other: &TreeEntry<'a>) -> Ordering { ++ match unsafe { raw::git_tree_entry_cmp(&*self.raw(), &*other.raw()) } { ++ 0 => Ordering::Equal, ++ n if n < 0 => Ordering::Less, ++ _ => Ordering::Greater, ++ } ++ } ++} ++ ++impl<'a> PartialEq for TreeEntry<'a> { ++ fn eq(&self, other: &TreeEntry<'a>) -> bool { ++ self.cmp(other) == Ordering::Equal ++ } ++} ++impl<'a> Eq for TreeEntry<'a> {} ++ ++impl<'a> Drop for TreeEntry<'a> { ++ fn drop(&mut self) { ++ if self.owned { ++ unsafe { raw::git_tree_entry_free(self.raw) } ++ } ++ } ++} ++ ++impl<'tree> Iterator for TreeIter<'tree> { ++ type Item = TreeEntry<'tree>; ++ fn next(&mut self) -> Option> { ++ self.range.next().and_then(|i| self.tree.get(i)) ++ } ++ fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } ++} ++impl<'tree> DoubleEndedIterator for TreeIter<'tree> { ++ fn next_back(&mut self) -> Option> { ++ self.range.next_back().and_then(|i| self.tree.get(i)) ++ } ++} ++impl<'tree> ExactSizeIterator for TreeIter<'tree> {} ++ ++#[cfg(test)] ++mod tests { ++ use {Repository,Tree,TreeEntry,ObjectType,Object}; ++ use tempdir::TempDir; ++ use std::fs::File; ++ use std::io::prelude::*; ++ use std::path::Path; ++ ++ pub struct TestTreeIter<'a> { ++ entries: Vec>, ++ repo: &'a Repository, ++ } ++ ++ impl<'a> Iterator for TestTreeIter<'a> { ++ type Item = TreeEntry<'a>; ++ ++ fn next(&mut self) -> Option > { ++ if self.entries.is_empty() { ++ None ++ } else { ++ let entry = self.entries.remove(0); ++ ++ match entry.kind() { ++ Some(ObjectType::Tree) => { ++ let obj: Object<'a> = entry.to_object(self.repo).unwrap(); ++ ++ let tree: &Tree<'a> = obj.as_tree().unwrap(); ++ ++ for entry in tree.iter() { ++ self.entries.push(entry.to_owned()); ++ } ++ } ++ _ => {} ++ } ++ ++ Some(entry) ++ } ++ } ++ } ++ ++ fn tree_iter<'repo>(tree: &Tree<'repo>, repo: &'repo Repository) ++ -> TestTreeIter<'repo> { ++ let mut initial = vec![]; ++ ++ for entry in tree.iter() { ++ initial.push(entry.to_owned()); ++ } ++ ++ TestTreeIter { ++ entries: initial, ++ repo: repo, ++ } ++ } ++ ++ #[test] ++ fn smoke_tree_iter() { ++ let (td, repo) = ::test::repo_init(); ++ ++ setup_repo(&td, &repo); ++ ++ let head = repo.head().unwrap(); ++ let target = head.target().unwrap(); ++ let commit = repo.find_commit(target).unwrap(); ++ ++ let tree = repo.find_tree(commit.tree_id()).unwrap(); ++ assert_eq!(tree.id(), commit.tree_id()); ++ assert_eq!(tree.len(), 1); ++ ++ for entry in tree_iter(&tree, &repo) { ++ println!("iter entry {:?}", entry.name()); ++ } ++ } ++ ++ fn setup_repo(td: &TempDir, repo: &Repository) { ++ let mut index = repo.index().unwrap(); ++ File::create(&td.path().join("foo")).unwrap().write_all(b"foo").unwrap(); ++ index.add_path(Path::new("foo")).unwrap(); ++ let id = index.write_tree().unwrap(); ++ let sig = repo.signature().unwrap(); ++ let tree = repo.find_tree(id).unwrap(); ++ let parent = repo.find_commit(repo.head().unwrap().target() ++ .unwrap()).unwrap(); ++ repo.commit(Some("HEAD"), &sig, &sig, "another commit", ++ &tree, &[&parent]).unwrap(); ++ } ++ ++ #[test] ++ fn smoke() { ++ let (td, repo) = ::test::repo_init(); ++ ++ setup_repo(&td, &repo); ++ ++ let head = repo.head().unwrap(); ++ let target = head.target().unwrap(); ++ let commit = repo.find_commit(target).unwrap(); ++ ++ let tree = repo.find_tree(commit.tree_id()).unwrap(); ++ assert_eq!(tree.id(), commit.tree_id()); ++ assert_eq!(tree.len(), 1); ++ { ++ let e1 = tree.get(0).unwrap(); ++ assert!(e1 == tree.get_id(e1.id()).unwrap()); ++ assert!(e1 == tree.get_name("foo").unwrap()); ++ assert!(e1 == tree.get_path(Path::new("foo")).unwrap()); ++ assert_eq!(e1.name(), Some("foo")); ++ e1.to_object(&repo).unwrap(); ++ } ++ tree.into_object(); ++ ++ repo.find_object(commit.tree_id(), None).unwrap().as_tree().unwrap(); ++ repo.find_object(commit.tree_id(), None).unwrap().into_tree().ok().unwrap(); ++ } ++} diff --cc vendor/git2-0.7.5/src/treebuilder.rs index 000000000,000000000..e8ea1057c new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/treebuilder.rs @@@ -1,0 -1,0 +1,199 @@@ ++use std::marker; ++use std::ptr; ++ ++use libc::{c_int, c_void}; ++ ++use {panic, raw, tree, Error, Oid, Repository, TreeEntry}; ++use util::{Binding, IntoCString}; ++ ++/// Constructor for in-memory trees ++pub struct TreeBuilder<'repo> { ++ raw: *mut raw::git_treebuilder, ++ _marker: marker::PhantomData<&'repo Repository>, ++} ++ ++impl<'repo> TreeBuilder<'repo> { ++ /// Clear all the entries in the builder ++ pub fn clear(&mut self) { ++ unsafe { raw::git_treebuilder_clear(self.raw) } ++ } ++ ++ /// Get the number of entries ++ pub fn len(&self) -> usize { ++ unsafe { raw::git_treebuilder_entrycount(self.raw) as usize } ++ } ++ ++ /// Return `true` if there is no entry ++ pub fn is_empty(&self) -> bool { ++ self.len() == 0 ++ } ++ ++ /// Get en entry from the builder from its filename ++ pub fn get

(&self, filename: P) -> Result, Error> ++ where P: IntoCString ++ { ++ let filename = try!(filename.into_c_string()); ++ unsafe { ++ let ret = raw::git_treebuilder_get(self.raw, filename.as_ptr()); ++ if ret.is_null() { ++ Ok(None) ++ } else { ++ Ok(Some(tree::entry_from_raw_const(ret))) ++ } ++ } ++ } ++ ++ /// Add or update an entry in the builder ++ /// ++ /// No attempt is made to ensure that the provided Oid points to ++ /// an object of a reasonable type (or any object at all). ++ /// ++ /// The mode given must be one of 0o040000, 0o100644, 0o100755, 0o120000 or ++ /// 0o160000 currently. ++ pub fn insert(&mut self, filename: P, oid: Oid, ++ filemode: i32) -> Result { ++ let filename = try!(filename.into_c_string()); ++ let filemode = filemode as raw::git_filemode_t; ++ ++ let mut ret = ptr::null(); ++ unsafe { ++ try_call!(raw::git_treebuilder_insert(&mut ret, self.raw, filename, ++ oid.raw(), filemode)); ++ Ok(tree::entry_from_raw_const(ret)) ++ } ++ } ++ ++ /// Remove an entry from the builder by its filename ++ pub fn remove(&mut self, filename: P) -> Result<(), Error> { ++ let filename = try!(filename.into_c_string()); ++ unsafe { ++ try_call!(raw::git_treebuilder_remove(self.raw, filename)); ++ } ++ Ok(()) ++ } ++ ++ /// Selectively remove entries from the tree ++ /// ++ /// Values for which the filter returns `true` will be kept. Note ++ /// that this behavior is different from the libgit2 C interface. ++ pub fn filter(&mut self, mut filter: F) ++ where F: FnMut(&TreeEntry) -> bool ++ { ++ let mut cb: &mut FilterCb = &mut filter; ++ let ptr = &mut cb as *mut _; ++ unsafe { ++ raw::git_treebuilder_filter(self.raw, filter_cb, ptr as *mut _); ++ panic::check(); ++ } ++ } ++ ++ /// Write the contents of the TreeBuilder as a Tree object and ++ /// return its Oid ++ pub fn write(&self) -> Result { ++ let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; ++ unsafe { ++ try_call!(raw::git_treebuilder_write(&mut raw, self.raw())); ++ Ok(Binding::from_raw(&raw as *const _)) ++ } ++ } ++} ++ ++type FilterCb<'a> = FnMut(&TreeEntry) -> bool + 'a; ++ ++extern fn filter_cb(entry: *const raw::git_tree_entry, ++ payload: *mut c_void) -> c_int { ++ let ret = panic::wrap(|| unsafe { ++ // There's no way to return early from git_treebuilder_filter. ++ if panic::panicked() { ++ true ++ } else { ++ let entry = tree::entry_from_raw_const(entry); ++ let payload = payload as *mut &mut FilterCb; ++ (*payload)(&entry) ++ } ++ }); ++ if ret == Some(false) {1} else {0} ++} ++ ++impl<'repo> Binding for TreeBuilder<'repo> { ++ type Raw = *mut raw::git_treebuilder; ++ ++ unsafe fn from_raw(raw: *mut raw::git_treebuilder) -> TreeBuilder<'repo> { ++ TreeBuilder { raw: raw, _marker: marker::PhantomData } ++ } ++ fn raw(&self) -> *mut raw::git_treebuilder { self.raw } ++} ++ ++impl<'repo> Drop for TreeBuilder<'repo> { ++ fn drop(&mut self) { ++ unsafe { raw::git_treebuilder_free(self.raw) } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use ObjectType; ++ ++ #[test] ++ fn smoke() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ let mut builder = repo.treebuilder(None).unwrap(); ++ assert_eq!(builder.len(), 0); ++ let blob = repo.blob(b"data").unwrap(); ++ { ++ let entry = builder.insert("a", blob, 0o100644).unwrap(); ++ assert_eq!(entry.kind(), Some(ObjectType::Blob)); ++ } ++ builder.insert("b", blob, 0o100644).unwrap(); ++ assert_eq!(builder.len(), 2); ++ builder.remove("a").unwrap(); ++ assert_eq!(builder.len(), 1); ++ assert_eq!(builder.get("b").unwrap().unwrap().id(), blob); ++ builder.clear(); ++ assert_eq!(builder.len(), 0); ++ } ++ ++ #[test] ++ fn write() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ let mut builder = repo.treebuilder(None).unwrap(); ++ let data = repo.blob(b"data").unwrap(); ++ builder.insert("name", data, 0o100644).unwrap(); ++ let tree = builder.write().unwrap(); ++ let tree = repo.find_tree(tree).unwrap(); ++ let entry = tree.get(0).unwrap(); ++ assert_eq!(entry.name(), Some("name")); ++ let blob = entry.to_object(&repo).unwrap(); ++ let blob = blob.as_blob().unwrap(); ++ assert_eq!(blob.content(), b"data"); ++ ++ let builder = repo.treebuilder(Some(&tree)).unwrap(); ++ assert_eq!(builder.len(), 1); ++ } ++ ++ #[test] ++ fn filter() { ++ let (_td, repo) = ::test::repo_init(); ++ ++ let mut builder = repo.treebuilder(None).unwrap(); ++ let blob = repo.blob(b"data").unwrap(); ++ let tree = { ++ let head = repo.head().unwrap() ++ .peel(ObjectType::Commit).unwrap(); ++ let head = head.as_commit().unwrap(); ++ head.tree_id() ++ }; ++ builder.insert("blob", blob, 0o100644).unwrap(); ++ builder.insert("dir", tree, 0o040000).unwrap(); ++ builder.insert("dir2", tree, 0o040000).unwrap(); ++ ++ builder.filter(|_| true); ++ assert_eq!(builder.len(), 3); ++ builder.filter(|e| e.kind().unwrap() != ObjectType::Blob); ++ assert_eq!(builder.len(), 2); ++ builder.filter(|_| false); ++ assert_eq!(builder.len(), 0); ++ } ++} diff --cc vendor/git2-0.7.5/src/util.rs index 000000000,000000000..e111628ea new file mode 100644 --- /dev/null +++ b/vendor/git2-0.7.5/src/util.rs @@@ -1,0 -1,0 +1,152 @@@ ++use std::ffi::{CString, OsStr, OsString}; ++use std::iter::IntoIterator; ++use std::path::{Path, PathBuf}; ++use libc::{c_char, size_t}; ++ ++use {raw, Error}; ++ ++#[doc(hidden)] ++pub trait IsNull { ++ fn is_ptr_null(&self) -> bool; ++} ++impl IsNull for *const T { ++ fn is_ptr_null(&self) -> bool { ++ self.is_null() ++ } ++} ++impl IsNull for *mut T { ++ fn is_ptr_null(&self) -> bool { ++ self.is_null() ++ } ++} ++ ++#[doc(hidden)] ++pub trait Binding: Sized { ++ type Raw; ++ ++ unsafe fn from_raw(raw: Self::Raw) -> Self; ++ fn raw(&self) -> Self::Raw; ++ ++ unsafe fn from_raw_opt(raw: T) -> Option ++ where T: Copy + IsNull, Self: Binding ++ { ++ if raw.is_ptr_null() { ++ None ++ } else { ++ Some(Binding::from_raw(raw)) ++ } ++ } ++} ++ ++pub fn iter2cstrs(iter: I) -> Result<(Vec, Vec<*const c_char>, ++ raw::git_strarray), Error> ++ where T: IntoCString, I: IntoIterator ++{ ++ let cstrs: Vec<_> = try!(iter.into_iter().map(|i| i.into_c_string()).collect()); ++ let ptrs = cstrs.iter().map(|i| i.as_ptr()).collect::>(); ++ let raw = raw::git_strarray { ++ strings: ptrs.as_ptr() as *mut _, ++ count: ptrs.len() as size_t, ++ }; ++ Ok((cstrs, ptrs, raw)) ++} ++ ++#[cfg(unix)] ++pub fn bytes2path(b: &[u8]) -> &Path { ++ use std::os::unix::prelude::*; ++ Path::new(OsStr::from_bytes(b)) ++} ++#[cfg(windows)] ++pub fn bytes2path(b: &[u8]) -> &Path { ++ use std::str; ++ Path::new(str::from_utf8(b).unwrap()) ++} ++ ++/// A class of types that can be converted to C strings. ++/// ++/// These types are represented internally as byte slices and it is quite rare ++/// for them to contain an interior 0 byte. ++pub trait IntoCString { ++ /// Consume this container, converting it into a CString ++ fn into_c_string(self) -> Result; ++} ++ ++impl<'a, T: IntoCString + Clone> IntoCString for &'a T { ++ fn into_c_string(self) -> Result { ++ self.clone().into_c_string() ++ } ++} ++ ++impl<'a> IntoCString for &'a str { ++ fn into_c_string(self) -> Result { ++ Ok(try!(CString::new(self))) ++ } ++} ++ ++impl IntoCString for String { ++ fn into_c_string(self) -> Result { ++ Ok(try!(CString::new(self.into_bytes()))) ++ } ++} ++ ++impl IntoCString for CString { ++ fn into_c_string(self) -> Result { Ok(self) } ++} ++ ++impl<'a> IntoCString for &'a Path { ++ fn into_c_string(self) -> Result { ++ let s: &OsStr = self.as_ref(); ++ s.into_c_string() ++ } ++} ++ ++impl IntoCString for PathBuf { ++ fn into_c_string(self) -> Result { ++ let s: OsString = self.into(); ++ s.into_c_string() ++ } ++} ++ ++impl<'a> IntoCString for &'a OsStr { ++ fn into_c_string(self) -> Result { ++ self.to_os_string().into_c_string() ++ } ++} ++ ++impl IntoCString for OsString { ++ #[cfg(unix)] ++ fn into_c_string(self) -> Result { ++ use std::os::unix::prelude::*; ++ let s: &OsStr = self.as_ref(); ++ Ok(try!(CString::new(s.as_bytes()))) ++ } ++ #[cfg(windows)] ++ fn into_c_string(self) -> Result { ++ match self.to_str() { ++ Some(s) => s.into_c_string(), ++ None => Err(Error::from_str("only valid unicode paths are accepted \ ++ on windows")), ++ } ++ } ++} ++ ++impl<'a> IntoCString for &'a [u8] { ++ fn into_c_string(self) -> Result { ++ Ok(try!(CString::new(self))) ++ } ++} ++ ++impl IntoCString for Vec { ++ fn into_c_string(self) -> Result { ++ Ok(try!(CString::new(self))) ++ } ++} ++ ++pub fn into_opt_c_string(opt_s: Option) -> Result, Error> ++ where S: IntoCString ++{ ++ match opt_s { ++ None => Ok(None), ++ Some(s) => Ok(Some(try!(s.into_c_string()))), ++ } ++} diff --cc vendor/globset-0.4.1/.cargo-checksum.json index 000000000,000000000..f8ebb011b new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"8e49edbcc9c7fc5beb8c0a54e7319ff8bed353a2b55e85811c6281188c2a6c84"} diff --cc vendor/globset-0.4.1/COPYING index 000000000,000000000..bb9c20a09 new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/COPYING @@@ -1,0 -1,0 +1,3 @@@ ++This project is dual-licensed under the Unlicense and MIT licenses. ++ ++You may use this code under the terms of either license. diff --cc vendor/globset-0.4.1/Cargo.toml index 000000000,000000000..621585efc new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/Cargo.toml @@@ -1,0 -1,0 +1,46 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "globset" ++version = "0.4.1" ++authors = ["Andrew Gallant "] ++description = "Cross platform single glob and glob set matching. Glob set matching is the\nprocess of matching one or more glob patterns against a single candidate path\nsimultaneously, and returning all of the globs that matched.\n" ++homepage = "https://github.com/BurntSushi/ripgrep/tree/master/globset" ++documentation = "https://docs.rs/globset" ++readme = "README.md" ++keywords = ["regex", "glob", "multiple", "set", "pattern"] ++license = "Unlicense/MIT" ++repository = "https://github.com/BurntSushi/ripgrep/tree/master/globset" ++ ++[lib] ++name = "globset" ++bench = false ++[dependencies.aho-corasick] ++version = "0.6.0" ++ ++[dependencies.fnv] ++version = "1.0" ++ ++[dependencies.log] ++version = "0.4" ++ ++[dependencies.memchr] ++version = "2" ++ ++[dependencies.regex] ++version = "1" ++[dev-dependencies.glob] ++version = "0.2" ++ ++[features] ++simd-accel = [] diff --cc vendor/globset-0.4.1/LICENSE-MIT index 000000000,000000000..3b0a5dc09 new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/LICENSE-MIT @@@ -1,0 -1,0 +1,21 @@@ ++The MIT License (MIT) ++ ++Copyright (c) 2015 Andrew Gallant ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. diff --cc vendor/globset-0.4.1/README.md index 000000000,000000000..f5caf22a0 new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/README.md @@@ -1,0 -1,0 +1,122 @@@ ++globset ++======= ++Cross platform single glob and glob set matching. Glob set matching is the ++process of matching one or more glob patterns against a single candidate path ++simultaneously, and returning all of the globs that matched. ++ ++[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.png)](https://travis-ci.org/BurntSushi/ripgrep) ++[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep) ++[![](https://img.shields.io/crates/v/globset.svg)](https://crates.io/crates/globset) ++ ++Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). ++ ++### Documentation ++ ++[https://docs.rs/globset](https://docs.rs/globset) ++ ++### Usage ++ ++Add this to your `Cargo.toml`: ++ ++```toml ++[dependencies] ++globset = "0.3" ++``` ++ ++and this to your crate root: ++ ++```rust ++extern crate globset; ++``` ++ ++### Example: one glob ++ ++This example shows how to match a single glob against a single file path. ++ ++```rust ++use globset::Glob; ++ ++let glob = Glob::new("*.rs")?.compile_matcher(); ++ ++assert!(glob.is_match("foo.rs")); ++assert!(glob.is_match("foo/bar.rs")); ++assert!(!glob.is_match("Cargo.toml")); ++``` ++ ++### Example: configuring a glob matcher ++ ++This example shows how to use a `GlobBuilder` to configure aspects of match ++semantics. In this example, we prevent wildcards from matching path separators. ++ ++```rust ++use globset::GlobBuilder; ++ ++let glob = GlobBuilder::new("*.rs") ++ .literal_separator(true).build()?.compile_matcher(); ++ ++assert!(glob.is_match("foo.rs")); ++assert!(!glob.is_match("foo/bar.rs")); // no longer matches ++assert!(!glob.is_match("Cargo.toml")); ++``` ++ ++### Example: match multiple globs at once ++ ++This example shows how to match multiple glob patterns at once. ++ ++```rust ++use globset::{Glob, GlobSetBuilder}; ++ ++let mut builder = GlobSetBuilder::new(); ++// A GlobBuilder can be used to configure each glob's match semantics ++// independently. ++builder.add(Glob::new("*.rs")?); ++builder.add(Glob::new("src/lib.rs")?); ++builder.add(Glob::new("src/**/foo.rs")?); ++let set = builder.build()?; ++ ++assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]); ++``` ++ ++### Performance ++ ++This crate implements globs by converting them to regular expressions, and ++executing them with the ++[`regex`](https://github.com/rust-lang-nursery/regex) ++crate. ++ ++For single glob matching, performance of this crate should be roughly on par ++with the performance of the ++[`glob`](https://github.com/rust-lang-nursery/glob) ++crate. (`*_regex` correspond to benchmarks for this library while `*_glob` ++correspond to benchmarks for the `glob` library.) ++Optimizations in the `regex` crate may propel this library past `glob`, ++particularly when matching longer paths. ++ ++``` ++test ext_glob ... bench: 425 ns/iter (+/- 21) ++test ext_regex ... bench: 175 ns/iter (+/- 10) ++test long_glob ... bench: 182 ns/iter (+/- 11) ++test long_regex ... bench: 173 ns/iter (+/- 10) ++test short_glob ... bench: 69 ns/iter (+/- 4) ++test short_regex ... bench: 83 ns/iter (+/- 2) ++``` ++ ++The primary performance advantage of this crate is when matching multiple ++globs against a single path. With the `glob` crate, one must match each glob ++synchronously, one after the other. In this crate, many can be matched ++simultaneously. For example: ++ ++``` ++test many_short_glob ... bench: 1,063 ns/iter (+/- 47) ++test many_short_regex_set ... bench: 186 ns/iter (+/- 11) ++``` ++ ++### Comparison with the [`glob`](https://github.com/rust-lang-nursery/glob) crate ++ ++* Supports alternate "or" globs, e.g., `*.{foo,bar}`. ++* Can match non-UTF-8 file paths correctly. ++* Supports matching multiple globs at once. ++* Doesn't provide a recursive directory iterator of matching file paths, ++ although I believe this crate should grow one eventually. ++* Supports case insensitive and require-literal-separator match options, but ++ **doesn't** support the require-literal-leading-dot option. diff --cc vendor/globset-0.4.1/UNLICENSE index 000000000,000000000..68a49daad new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/UNLICENSE @@@ -1,0 -1,0 +1,24 @@@ ++This is free and unencumbered software released into the public domain. ++ ++Anyone is free to copy, modify, publish, use, compile, sell, or ++distribute this software, either in source code form or as a compiled ++binary, for any purpose, commercial or non-commercial, and by any ++means. ++ ++In jurisdictions that recognize copyright laws, the author or authors ++of this software dedicate any and all copyright interest in the ++software to the public domain. We make this dedication for the benefit ++of the public at large and to the detriment of our heirs and ++successors. We intend this dedication to be an overt act of ++relinquishment in perpetuity of all present and future rights to this ++software under copyright law. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++OTHER DEALINGS IN THE SOFTWARE. ++ ++For more information, please refer to diff --cc vendor/globset-0.4.1/benches/bench.rs index 000000000,000000000..e142ed72e new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/benches/bench.rs @@@ -1,0 -1,0 +1,121 @@@ ++/*! ++This module benchmarks the glob implementation. For benchmarks on the ripgrep ++tool itself, see the benchsuite directory. ++*/ ++#![feature(test)] ++ ++extern crate glob; ++extern crate globset; ++#[macro_use] ++extern crate lazy_static; ++extern crate regex; ++extern crate test; ++ ++use std::ffi::OsStr; ++use std::path::Path; ++ ++use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder}; ++ ++const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt"; ++const EXT_PAT: &'static str = "*.txt"; ++ ++const SHORT: &'static str = "some/needle.txt"; ++const SHORT_PAT: &'static str = "some/**/needle.txt"; ++ ++const LONG: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt"; ++const LONG_PAT: &'static str = "some/**/needle.txt"; ++ ++fn new_glob(pat: &str) -> glob::Pattern { ++ glob::Pattern::new(pat).unwrap() ++} ++ ++fn new_reglob(pat: &str) -> GlobMatcher { ++ Glob::new(pat).unwrap().compile_matcher() ++} ++ ++fn new_reglob_many(pats: &[&str]) -> GlobSet { ++ let mut builder = GlobSetBuilder::new(); ++ for pat in pats { ++ builder.add(Glob::new(pat).unwrap()); ++ } ++ builder.build().unwrap() ++} ++ ++#[bench] ++fn ext_glob(b: &mut test::Bencher) { ++ let pat = new_glob(EXT_PAT); ++ b.iter(|| assert!(pat.matches(EXT))); ++} ++ ++#[bench] ++fn ext_regex(b: &mut test::Bencher) { ++ let set = new_reglob(EXT_PAT); ++ let cand = Candidate::new(EXT); ++ b.iter(|| assert!(set.is_match_candidate(&cand))); ++} ++ ++#[bench] ++fn short_glob(b: &mut test::Bencher) { ++ let pat = new_glob(SHORT_PAT); ++ b.iter(|| assert!(pat.matches(SHORT))); ++} ++ ++#[bench] ++fn short_regex(b: &mut test::Bencher) { ++ let set = new_reglob(SHORT_PAT); ++ let cand = Candidate::new(SHORT); ++ b.iter(|| assert!(set.is_match_candidate(&cand))); ++} ++ ++#[bench] ++fn long_glob(b: &mut test::Bencher) { ++ let pat = new_glob(LONG_PAT); ++ b.iter(|| assert!(pat.matches(LONG))); ++} ++ ++#[bench] ++fn long_regex(b: &mut test::Bencher) { ++ let set = new_reglob(LONG_PAT); ++ let cand = Candidate::new(LONG); ++ b.iter(|| assert!(set.is_match_candidate(&cand))); ++} ++ ++const MANY_SHORT_GLOBS: &'static [&'static str] = &[ ++ // Taken from a random .gitignore on my system. ++ ".*.swp", ++ "tags", ++ "target", ++ "*.lock", ++ "tmp", ++ "*.csv", ++ "*.fst", ++ "*-got", ++ "*.csv.idx", ++ "words", ++ "98m*", ++ "dict", ++ "test", ++ "months", ++]; ++ ++const MANY_SHORT_SEARCH: &'static str = "98m-blah.csv.idx"; ++ ++#[bench] ++fn many_short_glob(b: &mut test::Bencher) { ++ let pats: Vec<_> = MANY_SHORT_GLOBS.iter().map(|&s| new_glob(s)).collect(); ++ b.iter(|| { ++ let mut count = 0; ++ for pat in &pats { ++ if pat.matches(MANY_SHORT_SEARCH) { ++ count += 1; ++ } ++ } ++ assert_eq!(2, count); ++ }) ++} ++ ++#[bench] ++fn many_short_regex_set(b: &mut test::Bencher) { ++ let set = new_reglob_many(MANY_SHORT_GLOBS); ++ b.iter(|| assert_eq!(2, set.matches(MANY_SHORT_SEARCH).iter().count())); ++} diff --cc vendor/globset-0.4.1/src/glob.rs index 000000000,000000000..cbbc7bad3 new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/src/glob.rs @@@ -1,0 -1,0 +1,1452 @@@ ++use std::fmt; ++use std::hash; ++use std::iter; ++use std::ops::{Deref, DerefMut}; ++use std::path::{Path, is_separator}; ++use std::str; ++ ++use regex; ++use regex::bytes::Regex; ++ ++use {Candidate, Error, ErrorKind, new_regex}; ++ ++/// Describes a matching strategy for a particular pattern. ++/// ++/// This provides a way to more quickly determine whether a pattern matches ++/// a particular file path in a way that scales with a large number of ++/// patterns. For example, if many patterns are of the form `*.ext`, then it's ++/// possible to test whether any of those patterns matches by looking up a ++/// file path's extension in a hash table. ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub enum MatchStrategy { ++ /// A pattern matches if and only if the entire file path matches this ++ /// literal string. ++ Literal(String), ++ /// A pattern matches if and only if the file path's basename matches this ++ /// literal string. ++ BasenameLiteral(String), ++ /// A pattern matches if and only if the file path's extension matches this ++ /// literal string. ++ Extension(String), ++ /// A pattern matches if and only if this prefix literal is a prefix of the ++ /// candidate file path. ++ Prefix(String), ++ /// A pattern matches if and only if this prefix literal is a prefix of the ++ /// candidate file path. ++ /// ++ /// An exception: if `component` is true, then `suffix` must appear at the ++ /// beginning of a file path or immediately following a `/`. ++ Suffix { ++ /// The actual suffix. ++ suffix: String, ++ /// Whether this must start at the beginning of a path component. ++ component: bool, ++ }, ++ /// A pattern matches only if the given extension matches the file path's ++ /// extension. Note that this is a necessary but NOT sufficient criterion. ++ /// Namely, if the extension matches, then a full regex search is still ++ /// required. ++ RequiredExtension(String), ++ /// A regex needs to be used for matching. ++ Regex, ++} ++ ++impl MatchStrategy { ++ /// Returns a matching strategy for the given pattern. ++ pub fn new(pat: &Glob) -> MatchStrategy { ++ if let Some(lit) = pat.basename_literal() { ++ MatchStrategy::BasenameLiteral(lit) ++ } else if let Some(lit) = pat.literal() { ++ MatchStrategy::Literal(lit) ++ } else if let Some(ext) = pat.ext() { ++ MatchStrategy::Extension(ext) ++ } else if let Some(prefix) = pat.prefix() { ++ MatchStrategy::Prefix(prefix) ++ } else if let Some((suffix, component)) = pat.suffix() { ++ MatchStrategy::Suffix { suffix: suffix, component: component } ++ } else if let Some(ext) = pat.required_ext() { ++ MatchStrategy::RequiredExtension(ext) ++ } else { ++ MatchStrategy::Regex ++ } ++ } ++} ++ ++/// Glob represents a successfully parsed shell glob pattern. ++/// ++/// It cannot be used directly to match file paths, but it can be converted ++/// to a regular expression string or a matcher. ++#[derive(Clone, Debug, Eq)] ++pub struct Glob { ++ glob: String, ++ re: String, ++ opts: GlobOptions, ++ tokens: Tokens, ++} ++ ++impl PartialEq for Glob { ++ fn eq(&self, other: &Glob) -> bool { ++ self.glob == other.glob && self.opts == other.opts ++ } ++} ++ ++impl hash::Hash for Glob { ++ fn hash(&self, state: &mut H) { ++ self.glob.hash(state); ++ self.opts.hash(state); ++ } ++} ++ ++impl fmt::Display for Glob { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ self.glob.fmt(f) ++ } ++} ++ ++/// A matcher for a single pattern. ++#[derive(Clone, Debug)] ++pub struct GlobMatcher { ++ /// The underlying pattern. ++ pat: Glob, ++ /// The pattern, as a compiled regex. ++ re: Regex, ++} ++ ++impl GlobMatcher { ++ /// Tests whether the given path matches this pattern or not. ++ pub fn is_match>(&self, path: P) -> bool { ++ self.is_match_candidate(&Candidate::new(path.as_ref())) ++ } ++ ++ /// Tests whether the given path matches this pattern or not. ++ pub fn is_match_candidate(&self, path: &Candidate) -> bool { ++ self.re.is_match(&path.path) ++ } ++} ++ ++/// A strategic matcher for a single pattern. ++#[cfg(test)] ++#[derive(Clone, Debug)] ++struct GlobStrategic { ++ /// The match strategy to use. ++ strategy: MatchStrategy, ++ /// The underlying pattern. ++ pat: Glob, ++ /// The pattern, as a compiled regex. ++ re: Regex, ++} ++ ++#[cfg(test)] ++impl GlobStrategic { ++ /// Tests whether the given path matches this pattern or not. ++ fn is_match>(&self, path: P) -> bool { ++ self.is_match_candidate(&Candidate::new(path.as_ref())) ++ } ++ ++ /// Tests whether the given path matches this pattern or not. ++ fn is_match_candidate(&self, candidate: &Candidate) -> bool { ++ let byte_path = &*candidate.path; ++ ++ match self.strategy { ++ MatchStrategy::Literal(ref lit) => lit.as_bytes() == byte_path, ++ MatchStrategy::BasenameLiteral(ref lit) => { ++ lit.as_bytes() == &*candidate.basename ++ } ++ MatchStrategy::Extension(ref ext) => { ++ ext.as_bytes() == &*candidate.ext ++ } ++ MatchStrategy::Prefix(ref pre) => { ++ starts_with(pre.as_bytes(), byte_path) ++ } ++ MatchStrategy::Suffix { ref suffix, component } => { ++ if component && byte_path == &suffix.as_bytes()[1..] { ++ return true; ++ } ++ ends_with(suffix.as_bytes(), byte_path) ++ } ++ MatchStrategy::RequiredExtension(ref ext) => { ++ let ext = ext.as_bytes(); ++ &*candidate.ext == ext && self.re.is_match(byte_path) ++ } ++ MatchStrategy::Regex => self.re.is_match(byte_path), ++ } ++ } ++} ++ ++/// A builder for a pattern. ++/// ++/// This builder enables configuring the match semantics of a pattern. For ++/// example, one can make matching case insensitive. ++/// ++/// The lifetime `'a` refers to the lifetime of the pattern string. ++#[derive(Clone, Debug)] ++pub struct GlobBuilder<'a> { ++ /// The glob pattern to compile. ++ glob: &'a str, ++ /// Options for the pattern. ++ opts: GlobOptions, ++} ++ ++#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] ++struct GlobOptions { ++ /// Whether to match case insensitively. ++ case_insensitive: bool, ++ /// Whether to require a literal separator to match a separator in a file ++ /// path. e.g., when enabled, `*` won't match `/`. ++ literal_separator: bool, ++ /// Whether or not to use `\` to escape special characters. ++ /// e.g., when enabled, `\*` will match a literal `*`. ++ backslash_escape: bool, ++} ++ ++impl GlobOptions { ++ fn default() -> GlobOptions { ++ GlobOptions { ++ case_insensitive: false, ++ literal_separator: false, ++ backslash_escape: !is_separator('\\'), ++ } ++ } ++} ++ ++#[derive(Clone, Debug, Default, Eq, PartialEq)] ++struct Tokens(Vec); ++ ++impl Deref for Tokens { ++ type Target = Vec; ++ fn deref(&self) -> &Vec { &self.0 } ++} ++ ++impl DerefMut for Tokens { ++ fn deref_mut(&mut self) -> &mut Vec { &mut self.0 } ++} ++ ++#[derive(Clone, Debug, Eq, PartialEq)] ++enum Token { ++ Literal(char), ++ Any, ++ ZeroOrMore, ++ RecursivePrefix, ++ RecursiveSuffix, ++ RecursiveZeroOrMore, ++ Class { ++ negated: bool, ++ ranges: Vec<(char, char)>, ++ }, ++ Alternates(Vec), ++} ++ ++impl Glob { ++ /// Builds a new pattern with default options. ++ pub fn new(glob: &str) -> Result { ++ GlobBuilder::new(glob).build() ++ } ++ ++ /// Returns a matcher for this pattern. ++ pub fn compile_matcher(&self) -> GlobMatcher { ++ let re = new_regex(&self.re) ++ .expect("regex compilation shouldn't fail"); ++ GlobMatcher { ++ pat: self.clone(), ++ re: re, ++ } ++ } ++ ++ /// Returns a strategic matcher. ++ /// ++ /// This isn't exposed because it's not clear whether it's actually ++ /// faster than just running a regex for a *single* pattern. If it ++ /// is faster, then GlobMatcher should do it automatically. ++ #[cfg(test)] ++ fn compile_strategic_matcher(&self) -> GlobStrategic { ++ let strategy = MatchStrategy::new(self); ++ let re = new_regex(&self.re) ++ .expect("regex compilation shouldn't fail"); ++ GlobStrategic { ++ strategy: strategy, ++ pat: self.clone(), ++ re: re, ++ } ++ } ++ ++ /// Returns the original glob pattern used to build this pattern. ++ pub fn glob(&self) -> &str { ++ &self.glob ++ } ++ ++ /// Returns the regular expression string for this glob. ++ /// ++ /// Note that regular expressions for globs are intended to be matched on ++ /// arbitrary bytes (`&[u8]`) instead of Unicode strings (`&str`). In ++ /// particular, globs are frequently used on file paths, where there is no ++ /// general guarantee that file paths are themselves valid UTF-8. As a ++ /// result, callers will need to ensure that they are using a regex API ++ /// that can match on arbitrary bytes. For example, the ++ /// [`regex`](https://crates.io/regex) ++ /// crate's ++ /// [`Regex`](https://docs.rs/regex/*/regex/struct.Regex.html) ++ /// API is not suitable for this since it matches on `&str`, but its ++ /// [`bytes::Regex`](https://docs.rs/regex/*/regex/bytes/struct.Regex.html) ++ /// API is suitable for this. ++ pub fn regex(&self) -> &str { ++ &self.re ++ } ++ ++ /// Returns the pattern as a literal if and only if the pattern must match ++ /// an entire path exactly. ++ /// ++ /// The basic format of these patterns is `{literal}`. ++ fn literal(&self) -> Option { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ let mut lit = String::new(); ++ for t in &*self.tokens { ++ match *t { ++ Token::Literal(c) => lit.push(c), ++ _ => return None, ++ } ++ } ++ if lit.is_empty() { ++ None ++ } else { ++ Some(lit) ++ } ++ } ++ ++ /// Returns an extension if this pattern matches a file path if and only ++ /// if the file path has the extension returned. ++ /// ++ /// Note that this extension returned differs from the extension that ++ /// std::path::Path::extension returns. Namely, this extension includes ++ /// the '.'. Also, paths like `.rs` are considered to have an extension ++ /// of `.rs`. ++ fn ext(&self) -> Option { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ let start = match self.tokens.get(0) { ++ Some(&Token::RecursivePrefix) => 1, ++ Some(_) => 0, ++ _ => return None, ++ }; ++ match self.tokens.get(start) { ++ Some(&Token::ZeroOrMore) => { ++ // If there was no recursive prefix, then we only permit ++ // `*` if `*` can match a `/`. For example, if `*` can't ++ // match `/`, then `*.c` doesn't match `foo/bar.c`. ++ if start == 0 && self.opts.literal_separator { ++ return None; ++ } ++ } ++ _ => return None, ++ } ++ match self.tokens.get(start + 1) { ++ Some(&Token::Literal('.')) => {} ++ _ => return None, ++ } ++ let mut lit = ".".to_string(); ++ for t in self.tokens[start + 2..].iter() { ++ match *t { ++ Token::Literal('.') | Token::Literal('/') => return None, ++ Token::Literal(c) => lit.push(c), ++ _ => return None, ++ } ++ } ++ if lit.is_empty() { ++ None ++ } else { ++ Some(lit) ++ } ++ } ++ ++ /// This is like `ext`, but returns an extension even if it isn't sufficent ++ /// to imply a match. Namely, if an extension is returned, then it is ++ /// necessary but not sufficient for a match. ++ fn required_ext(&self) -> Option { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ // We don't care at all about the beginning of this pattern. All we ++ // need to check for is if it ends with a literal of the form `.ext`. ++ let mut ext: Vec = vec![]; // built in reverse ++ for t in self.tokens.iter().rev() { ++ match *t { ++ Token::Literal('/') => return None, ++ Token::Literal(c) => { ++ ext.push(c); ++ if c == '.' { ++ break; ++ } ++ } ++ _ => return None, ++ } ++ } ++ if ext.last() != Some(&'.') { ++ None ++ } else { ++ ext.reverse(); ++ Some(ext.into_iter().collect()) ++ } ++ } ++ ++ /// Returns a literal prefix of this pattern if the entire pattern matches ++ /// if the literal prefix matches. ++ fn prefix(&self) -> Option { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ let end = match self.tokens.last() { ++ Some(&Token::ZeroOrMore) => { ++ if self.opts.literal_separator { ++ // If a trailing `*` can't match a `/`, then we can't ++ // assume a match of the prefix corresponds to a match ++ // of the overall pattern. e.g., `foo/*` with ++ // `literal_separator` enabled matches `foo/bar` but not ++ // `foo/bar/baz`, even though `foo/bar/baz` has a `foo/` ++ // literal prefix. ++ return None; ++ } ++ self.tokens.len() - 1 ++ } ++ _ => self.tokens.len(), ++ }; ++ let mut lit = String::new(); ++ for t in &self.tokens[0..end] { ++ match *t { ++ Token::Literal(c) => lit.push(c), ++ _ => return None, ++ } ++ } ++ if lit.is_empty() { ++ None ++ } else { ++ Some(lit) ++ } ++ } ++ ++ /// Returns a literal suffix of this pattern if the entire pattern matches ++ /// if the literal suffix matches. ++ /// ++ /// If a literal suffix is returned and it must match either the entire ++ /// file path or be preceded by a `/`, then also return true. This happens ++ /// with a pattern like `**/foo/bar`. Namely, this pattern matches ++ /// `foo/bar` and `baz/foo/bar`, but not `foofoo/bar`. In this case, the ++ /// suffix returned is `/foo/bar` (but should match the entire path ++ /// `foo/bar`). ++ /// ++ /// When this returns true, the suffix literal is guaranteed to start with ++ /// a `/`. ++ fn suffix(&self) -> Option<(String, bool)> { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ let mut lit = String::new(); ++ let (start, entire) = match self.tokens.get(0) { ++ Some(&Token::RecursivePrefix) => { ++ // We only care if this follows a path component if the next ++ // token is a literal. ++ if let Some(&Token::Literal(_)) = self.tokens.get(1) { ++ lit.push('/'); ++ (1, true) ++ } else { ++ (1, false) ++ } ++ } ++ _ => (0, false), ++ }; ++ let start = match self.tokens.get(start) { ++ Some(&Token::ZeroOrMore) => { ++ // If literal_separator is enabled, then a `*` can't ++ // necessarily match everything, so reporting a suffix match ++ // as a match of the pattern would be a false positive. ++ if self.opts.literal_separator { ++ return None; ++ } ++ start + 1 ++ } ++ _ => start, ++ }; ++ for t in &self.tokens[start..] { ++ match *t { ++ Token::Literal(c) => lit.push(c), ++ _ => return None, ++ } ++ } ++ if lit.is_empty() || lit == "/" { ++ None ++ } else { ++ Some((lit, entire)) ++ } ++ } ++ ++ /// If this pattern only needs to inspect the basename of a file path, ++ /// then the tokens corresponding to only the basename match are returned. ++ /// ++ /// For example, given a pattern of `**/*.foo`, only the tokens ++ /// corresponding to `*.foo` are returned. ++ /// ++ /// Note that this will return None if any match of the basename tokens ++ /// doesn't correspond to a match of the entire pattern. For example, the ++ /// glob `foo` only matches when a file path has a basename of `foo`, but ++ /// doesn't *always* match when a file path has a basename of `foo`. e.g., ++ /// `foo` doesn't match `abc/foo`. ++ fn basename_tokens(&self) -> Option<&[Token]> { ++ if self.opts.case_insensitive { ++ return None; ++ } ++ let start = match self.tokens.get(0) { ++ Some(&Token::RecursivePrefix) => 1, ++ _ => { ++ // With nothing to gobble up the parent portion of a path, ++ // we can't assume that matching on only the basename is ++ // correct. ++ return None; ++ } ++ }; ++ if self.tokens[start..].is_empty() { ++ return None; ++ } ++ for t in &self.tokens[start..] { ++ match *t { ++ Token::Literal('/') => return None, ++ Token::Literal(_) => {} // OK ++ Token::Any | Token::ZeroOrMore => { ++ if !self.opts.literal_separator { ++ // In this case, `*` and `?` can match a path ++ // separator, which means this could reach outside ++ // the basename. ++ return None; ++ } ++ } ++ Token::RecursivePrefix ++ | Token::RecursiveSuffix ++ | Token::RecursiveZeroOrMore => { ++ return None; ++ } ++ Token::Class{..} | Token::Alternates(..) => { ++ // We *could* be a little smarter here, but either one ++ // of these is going to prevent our literal optimizations ++ // anyway, so give up. ++ return None; ++ } ++ } ++ } ++ Some(&self.tokens[start..]) ++ } ++ ++ /// Returns the pattern as a literal if and only if the pattern exclusively ++ /// matches the basename of a file path *and* is a literal. ++ /// ++ /// The basic format of these patterns is `**/{literal}`, where `{literal}` ++ /// does not contain a path separator. ++ fn basename_literal(&self) -> Option { ++ let tokens = match self.basename_tokens() { ++ None => return None, ++ Some(tokens) => tokens, ++ }; ++ let mut lit = String::new(); ++ for t in tokens { ++ match *t { ++ Token::Literal(c) => lit.push(c), ++ _ => return None, ++ } ++ } ++ Some(lit) ++ } ++} ++ ++impl<'a> GlobBuilder<'a> { ++ /// Create a new builder for the pattern given. ++ /// ++ /// The pattern is not compiled until `build` is called. ++ pub fn new(glob: &'a str) -> GlobBuilder<'a> { ++ GlobBuilder { ++ glob: glob, ++ opts: GlobOptions::default(), ++ } ++ } ++ ++ /// Parses and builds the pattern. ++ pub fn build(&self) -> Result { ++ let mut p = Parser { ++ glob: &self.glob, ++ stack: vec![Tokens::default()], ++ chars: self.glob.chars().peekable(), ++ prev: None, ++ cur: None, ++ opts: &self.opts, ++ }; ++ p.parse()?; ++ if p.stack.is_empty() { ++ Err(Error { ++ glob: Some(self.glob.to_string()), ++ kind: ErrorKind::UnopenedAlternates, ++ }) ++ } else if p.stack.len() > 1 { ++ Err(Error { ++ glob: Some(self.glob.to_string()), ++ kind: ErrorKind::UnclosedAlternates, ++ }) ++ } else { ++ let tokens = p.stack.pop().unwrap(); ++ Ok(Glob { ++ glob: self.glob.to_string(), ++ re: tokens.to_regex_with(&self.opts), ++ opts: self.opts, ++ tokens: tokens, ++ }) ++ } ++ } ++ ++ /// Toggle whether the pattern matches case insensitively or not. ++ /// ++ /// This is disabled by default. ++ pub fn case_insensitive(&mut self, yes: bool) -> &mut GlobBuilder<'a> { ++ self.opts.case_insensitive = yes; ++ self ++ } ++ ++ /// Toggle whether a literal `/` is required to match a path separator. ++ pub fn literal_separator(&mut self, yes: bool) -> &mut GlobBuilder<'a> { ++ self.opts.literal_separator = yes; ++ self ++ } ++ ++ /// When enabled, a back slash (`\`) may be used to escape ++ /// special characters in a glob pattern. Additionally, this will ++ /// prevent `\` from being interpreted as a path separator on all ++ /// platforms. ++ /// ++ /// This is enabled by default on platforms where `\` is not a ++ /// path separator and disabled by default on platforms where `\` ++ /// is a path separator. ++ pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> { ++ self.opts.backslash_escape = yes; ++ self ++ } ++} ++ ++impl Tokens { ++ /// Convert this pattern to a string that is guaranteed to be a valid ++ /// regular expression and will represent the matching semantics of this ++ /// glob pattern and the options given. ++ fn to_regex_with(&self, options: &GlobOptions) -> String { ++ let mut re = String::new(); ++ re.push_str("(?-u)"); ++ if options.case_insensitive { ++ re.push_str("(?i)"); ++ } ++ re.push('^'); ++ // Special case. If the entire glob is just `**`, then it should match ++ // everything. ++ if self.len() == 1 && self[0] == Token::RecursivePrefix { ++ re.push_str(".*"); ++ re.push('$'); ++ return re; ++ } ++ self.tokens_to_regex(options, &self, &mut re); ++ re.push('$'); ++ re ++ } ++ ++ fn tokens_to_regex( ++ &self, ++ options: &GlobOptions, ++ tokens: &[Token], ++ re: &mut String, ++ ) { ++ for tok in tokens { ++ match *tok { ++ Token::Literal(c) => { ++ re.push_str(&char_to_escaped_literal(c)); ++ } ++ Token::Any => { ++ if options.literal_separator { ++ re.push_str("[^/]"); ++ } else { ++ re.push_str("."); ++ } ++ } ++ Token::ZeroOrMore => { ++ if options.literal_separator { ++ re.push_str("[^/]*"); ++ } else { ++ re.push_str(".*"); ++ } ++ } ++ Token::RecursivePrefix => { ++ re.push_str("(?:/?|.*/)"); ++ } ++ Token::RecursiveSuffix => { ++ re.push_str("(?:/?|/.*)"); ++ } ++ Token::RecursiveZeroOrMore => { ++ re.push_str("(?:/|/.*/)"); ++ } ++ Token::Class { negated, ref ranges } => { ++ re.push('['); ++ if negated { ++ re.push('^'); ++ } ++ for r in ranges { ++ if r.0 == r.1 { ++ // Not strictly necessary, but nicer to look at. ++ re.push_str(&char_to_escaped_literal(r.0)); ++ } else { ++ re.push_str(&char_to_escaped_literal(r.0)); ++ re.push('-'); ++ re.push_str(&char_to_escaped_literal(r.1)); ++ } ++ } ++ re.push(']'); ++ } ++ Token::Alternates(ref patterns) => { ++ let mut parts = vec![]; ++ for pat in patterns { ++ let mut altre = String::new(); ++ self.tokens_to_regex(options, &pat, &mut altre); ++ if !altre.is_empty() { ++ parts.push(altre); ++ } ++ } ++ ++ // It is possible to have an empty set in which case the ++ // resulting alternation '()' would be an error. ++ if !parts.is_empty() { ++ re.push('('); ++ re.push_str(&parts.join("|")); ++ re.push(')'); ++ } ++ } ++ } ++ } ++ } ++} ++ ++/// Convert a Unicode scalar value to an escaped string suitable for use as ++/// a literal in a non-Unicode regex. ++fn char_to_escaped_literal(c: char) -> String { ++ bytes_to_escaped_literal(&c.to_string().into_bytes()) ++} ++ ++/// Converts an arbitrary sequence of bytes to a UTF-8 string. All non-ASCII ++/// code units are converted to their escaped form. ++fn bytes_to_escaped_literal(bs: &[u8]) -> String { ++ let mut s = String::with_capacity(bs.len()); ++ for &b in bs { ++ if b <= 0x7F { ++ s.push_str(®ex::escape(&(b as char).to_string())); ++ } else { ++ s.push_str(&format!("\\x{:02x}", b)); ++ } ++ } ++ s ++} ++ ++struct Parser<'a> { ++ glob: &'a str, ++ stack: Vec, ++ chars: iter::Peekable>, ++ prev: Option, ++ cur: Option, ++ opts: &'a GlobOptions, ++} ++ ++impl<'a> Parser<'a> { ++ fn error(&self, kind: ErrorKind) -> Error { ++ Error { glob: Some(self.glob.to_string()), kind: kind } ++ } ++ ++ fn parse(&mut self) -> Result<(), Error> { ++ while let Some(c) = self.bump() { ++ match c { ++ '?' => self.push_token(Token::Any)?, ++ '*' => self.parse_star()?, ++ '[' => self.parse_class()?, ++ '{' => self.push_alternate()?, ++ '}' => self.pop_alternate()?, ++ ',' => self.parse_comma()?, ++ '\\' => self.parse_backslash()?, ++ c => self.push_token(Token::Literal(c))?, ++ } ++ } ++ Ok(()) ++ } ++ ++ fn push_alternate(&mut self) -> Result<(), Error> { ++ if self.stack.len() > 1 { ++ return Err(self.error(ErrorKind::NestedAlternates)); ++ } ++ Ok(self.stack.push(Tokens::default())) ++ } ++ ++ fn pop_alternate(&mut self) -> Result<(), Error> { ++ let mut alts = vec![]; ++ while self.stack.len() >= 2 { ++ alts.push(self.stack.pop().unwrap()); ++ } ++ self.push_token(Token::Alternates(alts)) ++ } ++ ++ fn push_token(&mut self, tok: Token) -> Result<(), Error> { ++ if let Some(ref mut pat) = self.stack.last_mut() { ++ return Ok(pat.push(tok)); ++ } ++ Err(self.error(ErrorKind::UnopenedAlternates)) ++ } ++ ++ fn pop_token(&mut self) -> Result { ++ if let Some(ref mut pat) = self.stack.last_mut() { ++ return Ok(pat.pop().unwrap()); ++ } ++ Err(self.error(ErrorKind::UnopenedAlternates)) ++ } ++ ++ fn have_tokens(&self) -> Result { ++ match self.stack.last() { ++ None => Err(self.error(ErrorKind::UnopenedAlternates)), ++ Some(ref pat) => Ok(!pat.is_empty()), ++ } ++ } ++ ++ fn parse_comma(&mut self) -> Result<(), Error> { ++ // If we aren't inside a group alternation, then don't ++ // treat commas specially. Otherwise, we need to start ++ // a new alternate. ++ if self.stack.len() <= 1 { ++ self.push_token(Token::Literal(',')) ++ } else { ++ Ok(self.stack.push(Tokens::default())) ++ } ++ } ++ ++ fn parse_backslash(&mut self) -> Result<(), Error> { ++ if self.opts.backslash_escape { ++ match self.bump() { ++ None => Err(self.error(ErrorKind::DanglingEscape)), ++ Some(c) => self.push_token(Token::Literal(c)), ++ } ++ } else if is_separator('\\') { ++ // Normalize all patterns to use / as a separator. ++ self.push_token(Token::Literal('/')) ++ } else { ++ self.push_token(Token::Literal('\\')) ++ } ++ } ++ ++ fn parse_star(&mut self) -> Result<(), Error> { ++ let prev = self.prev; ++ if self.chars.peek() != Some(&'*') { ++ self.push_token(Token::ZeroOrMore)?; ++ return Ok(()); ++ } ++ assert!(self.bump() == Some('*')); ++ if !self.have_tokens()? { ++ self.push_token(Token::RecursivePrefix)?; ++ let next = self.bump(); ++ if !next.map(is_separator).unwrap_or(true) { ++ return Err(self.error(ErrorKind::InvalidRecursive)); ++ } ++ return Ok(()); ++ } ++ self.pop_token()?; ++ if !prev.map(is_separator).unwrap_or(false) { ++ if self.stack.len() <= 1 ++ || (prev != Some(',') && prev != Some('{')) { ++ return Err(self.error(ErrorKind::InvalidRecursive)); ++ } ++ } ++ match self.chars.peek() { ++ None => { ++ assert!(self.bump().is_none()); ++ self.push_token(Token::RecursiveSuffix) ++ } ++ Some(&',') | Some(&'}') if self.stack.len() >= 2 => { ++ self.push_token(Token::RecursiveSuffix) ++ } ++ Some(&c) if is_separator(c) => { ++ assert!(self.bump().map(is_separator).unwrap_or(false)); ++ self.push_token(Token::RecursiveZeroOrMore) ++ } ++ _ => Err(self.error(ErrorKind::InvalidRecursive)), ++ } ++ } ++ ++ fn parse_class(&mut self) -> Result<(), Error> { ++ fn add_to_last_range( ++ glob: &str, ++ r: &mut (char, char), ++ add: char, ++ ) -> Result<(), Error> { ++ r.1 = add; ++ if r.1 < r.0 { ++ Err(Error { ++ glob: Some(glob.to_string()), ++ kind: ErrorKind::InvalidRange(r.0, r.1), ++ }) ++ } else { ++ Ok(()) ++ } ++ } ++ let mut ranges = vec![]; ++ let negated = match self.chars.peek() { ++ Some(&'!') | Some(&'^') => { ++ let bump = self.bump(); ++ assert!(bump == Some('!') || bump == Some('^')); ++ true ++ } ++ _ => false, ++ }; ++ let mut first = true; ++ let mut in_range = false; ++ loop { ++ let c = match self.bump() { ++ Some(c) => c, ++ // The only way to successfully break this loop is to observe ++ // a ']'. ++ None => return Err(self.error(ErrorKind::UnclosedClass)), ++ }; ++ match c { ++ ']' => { ++ if first { ++ ranges.push((']', ']')); ++ } else { ++ break; ++ } ++ } ++ '-' => { ++ if first { ++ ranges.push(('-', '-')); ++ } else if in_range { ++ // invariant: in_range is only set when there is ++ // already at least one character seen. ++ let r = ranges.last_mut().unwrap(); ++ add_to_last_range(&self.glob, r, '-')?; ++ in_range = false; ++ } else { ++ assert!(!ranges.is_empty()); ++ in_range = true; ++ } ++ } ++ c => { ++ if in_range { ++ // invariant: in_range is only set when there is ++ // already at least one character seen. ++ add_to_last_range( ++ &self.glob, ranges.last_mut().unwrap(), c)?; ++ } else { ++ ranges.push((c, c)); ++ } ++ in_range = false; ++ } ++ } ++ first = false; ++ } ++ if in_range { ++ // Means that the last character in the class was a '-', so add ++ // it as a literal. ++ ranges.push(('-', '-')); ++ } ++ self.push_token(Token::Class { ++ negated: negated, ++ ranges: ranges, ++ }) ++ } ++ ++ fn bump(&mut self) -> Option { ++ self.prev = self.cur; ++ self.cur = self.chars.next(); ++ self.cur ++ } ++} ++ ++#[cfg(test)] ++fn starts_with(needle: &[u8], haystack: &[u8]) -> bool { ++ needle.len() <= haystack.len() && needle == &haystack[..needle.len()] ++} ++ ++#[cfg(test)] ++fn ends_with(needle: &[u8], haystack: &[u8]) -> bool { ++ if needle.len() > haystack.len() { ++ return false; ++ } ++ needle == &haystack[haystack.len() - needle.len()..] ++} ++ ++#[cfg(test)] ++mod tests { ++ use {GlobSetBuilder, ErrorKind}; ++ use super::{Glob, GlobBuilder, Token}; ++ use super::Token::*; ++ ++ #[derive(Clone, Copy, Debug, Default)] ++ struct Options { ++ casei: Option, ++ litsep: Option, ++ bsesc: Option, ++ } ++ ++ macro_rules! syntax { ++ ($name:ident, $pat:expr, $tokens:expr) => { ++ #[test] ++ fn $name() { ++ let pat = Glob::new($pat).unwrap(); ++ assert_eq!($tokens, pat.tokens.0); ++ } ++ } ++ } ++ ++ macro_rules! syntaxerr { ++ ($name:ident, $pat:expr, $err:expr) => { ++ #[test] ++ fn $name() { ++ let err = Glob::new($pat).unwrap_err(); ++ assert_eq!(&$err, err.kind()); ++ } ++ } ++ } ++ ++ macro_rules! toregex { ++ ($name:ident, $pat:expr, $re:expr) => { ++ toregex!($name, $pat, $re, Options::default()); ++ }; ++ ($name:ident, $pat:expr, $re:expr, $options:expr) => { ++ #[test] ++ fn $name() { ++ let mut builder = GlobBuilder::new($pat); ++ if let Some(casei) = $options.casei { ++ builder.case_insensitive(casei); ++ } ++ if let Some(litsep) = $options.litsep { ++ builder.literal_separator(litsep); ++ } ++ if let Some(bsesc) = $options.bsesc { ++ builder.backslash_escape(bsesc); ++ } ++ let pat = builder.build().unwrap(); ++ assert_eq!(format!("(?-u){}", $re), pat.regex()); ++ } ++ }; ++ } ++ ++ macro_rules! matches { ++ ($name:ident, $pat:expr, $path:expr) => { ++ matches!($name, $pat, $path, Options::default()); ++ }; ++ ($name:ident, $pat:expr, $path:expr, $options:expr) => { ++ #[test] ++ fn $name() { ++ let mut builder = GlobBuilder::new($pat); ++ if let Some(casei) = $options.casei { ++ builder.case_insensitive(casei); ++ } ++ if let Some(litsep) = $options.litsep { ++ builder.literal_separator(litsep); ++ } ++ if let Some(bsesc) = $options.bsesc { ++ builder.backslash_escape(bsesc); ++ } ++ let pat = builder.build().unwrap(); ++ let matcher = pat.compile_matcher(); ++ let strategic = pat.compile_strategic_matcher(); ++ let set = GlobSetBuilder::new().add(pat).build().unwrap(); ++ assert!(matcher.is_match($path)); ++ assert!(strategic.is_match($path)); ++ assert!(set.is_match($path)); ++ } ++ }; ++ } ++ ++ macro_rules! nmatches { ++ ($name:ident, $pat:expr, $path:expr) => { ++ nmatches!($name, $pat, $path, Options::default()); ++ }; ++ ($name:ident, $pat:expr, $path:expr, $options:expr) => { ++ #[test] ++ fn $name() { ++ let mut builder = GlobBuilder::new($pat); ++ if let Some(casei) = $options.casei { ++ builder.case_insensitive(casei); ++ } ++ if let Some(litsep) = $options.litsep { ++ builder.literal_separator(litsep); ++ } ++ if let Some(bsesc) = $options.bsesc { ++ builder.backslash_escape(bsesc); ++ } ++ let pat = builder.build().unwrap(); ++ let matcher = pat.compile_matcher(); ++ let strategic = pat.compile_strategic_matcher(); ++ let set = GlobSetBuilder::new().add(pat).build().unwrap(); ++ assert!(!matcher.is_match($path)); ++ assert!(!strategic.is_match($path)); ++ assert!(!set.is_match($path)); ++ } ++ }; ++ } ++ ++ fn s(string: &str) -> String { string.to_string() } ++ ++ fn class(s: char, e: char) -> Token { ++ Class { negated: false, ranges: vec![(s, e)] } ++ } ++ ++ fn classn(s: char, e: char) -> Token { ++ Class { negated: true, ranges: vec![(s, e)] } ++ } ++ ++ fn rclass(ranges: &[(char, char)]) -> Token { ++ Class { negated: false, ranges: ranges.to_vec() } ++ } ++ ++ fn rclassn(ranges: &[(char, char)]) -> Token { ++ Class { negated: true, ranges: ranges.to_vec() } ++ } ++ ++ syntax!(literal1, "a", vec![Literal('a')]); ++ syntax!(literal2, "ab", vec![Literal('a'), Literal('b')]); ++ syntax!(any1, "?", vec![Any]); ++ syntax!(any2, "a?b", vec![Literal('a'), Any, Literal('b')]); ++ syntax!(seq1, "*", vec![ZeroOrMore]); ++ syntax!(seq2, "a*b", vec![Literal('a'), ZeroOrMore, Literal('b')]); ++ syntax!(seq3, "*a*b*", vec![ ++ ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore, ++ ]); ++ syntax!(rseq1, "**", vec![RecursivePrefix]); ++ syntax!(rseq2, "**/", vec![RecursivePrefix]); ++ syntax!(rseq3, "/**", vec![RecursiveSuffix]); ++ syntax!(rseq4, "/**/", vec![RecursiveZeroOrMore]); ++ syntax!(rseq5, "a/**/b", vec![ ++ Literal('a'), RecursiveZeroOrMore, Literal('b'), ++ ]); ++ syntax!(cls1, "[a]", vec![class('a', 'a')]); ++ syntax!(cls2, "[!a]", vec![classn('a', 'a')]); ++ syntax!(cls3, "[a-z]", vec![class('a', 'z')]); ++ syntax!(cls4, "[!a-z]", vec![classn('a', 'z')]); ++ syntax!(cls5, "[-]", vec![class('-', '-')]); ++ syntax!(cls6, "[]]", vec![class(']', ']')]); ++ syntax!(cls7, "[*]", vec![class('*', '*')]); ++ syntax!(cls8, "[!!]", vec![classn('!', '!')]); ++ syntax!(cls9, "[a-]", vec![rclass(&[('a', 'a'), ('-', '-')])]); ++ syntax!(cls10, "[-a-z]", vec![rclass(&[('-', '-'), ('a', 'z')])]); ++ syntax!(cls11, "[a-z-]", vec![rclass(&[('a', 'z'), ('-', '-')])]); ++ syntax!(cls12, "[-a-z-]", vec![ ++ rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]), ++ ]); ++ syntax!(cls13, "[]-z]", vec![class(']', 'z')]); ++ syntax!(cls14, "[--z]", vec![class('-', 'z')]); ++ syntax!(cls15, "[ --]", vec![class(' ', '-')]); ++ syntax!(cls16, "[0-9a-z]", vec![rclass(&[('0', '9'), ('a', 'z')])]); ++ syntax!(cls17, "[a-z0-9]", vec![rclass(&[('a', 'z'), ('0', '9')])]); ++ syntax!(cls18, "[!0-9a-z]", vec![rclassn(&[('0', '9'), ('a', 'z')])]); ++ syntax!(cls19, "[!a-z0-9]", vec![rclassn(&[('a', 'z'), ('0', '9')])]); ++ syntax!(cls20, "[^a]", vec![classn('a', 'a')]); ++ syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]); ++ ++ syntaxerr!(err_rseq1, "a**", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq2, "**a", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq3, "a**b", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq4, "***", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq5, "/a**", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq6, "/**a", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_rseq7, "/a**b", ErrorKind::InvalidRecursive); ++ syntaxerr!(err_unclosed1, "[", ErrorKind::UnclosedClass); ++ syntaxerr!(err_unclosed2, "[]", ErrorKind::UnclosedClass); ++ syntaxerr!(err_unclosed3, "[!", ErrorKind::UnclosedClass); ++ syntaxerr!(err_unclosed4, "[!]", ErrorKind::UnclosedClass); ++ syntaxerr!(err_range1, "[z-a]", ErrorKind::InvalidRange('z', 'a')); ++ syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-')); ++ ++ const CASEI: Options = Options { ++ casei: Some(true), ++ litsep: None, ++ bsesc: None, ++ }; ++ const SLASHLIT: Options = Options { ++ casei: None, ++ litsep: Some(true), ++ bsesc: None, ++ }; ++ const NOBSESC: Options = Options { ++ casei: None, ++ litsep: None, ++ bsesc: Some(false), ++ }; ++ const BSESC: Options = Options { ++ casei: None, ++ litsep: None, ++ bsesc: Some(true), ++ }; ++ ++ toregex!(re_casei, "a", "(?i)^a$", &CASEI); ++ ++ toregex!(re_slash1, "?", r"^[^/]$", SLASHLIT); ++ toregex!(re_slash2, "*", r"^[^/]*$", SLASHLIT); ++ ++ toregex!(re1, "a", "^a$"); ++ toregex!(re2, "?", "^.$"); ++ toregex!(re3, "*", "^.*$"); ++ toregex!(re4, "a?", "^a.$"); ++ toregex!(re5, "?a", "^.a$"); ++ toregex!(re6, "a*", "^a.*$"); ++ toregex!(re7, "*a", "^.*a$"); ++ toregex!(re8, "[*]", r"^[\*]$"); ++ toregex!(re9, "[+]", r"^[\+]$"); ++ toregex!(re10, "+", r"^\+$"); ++ toregex!(re11, "**", r"^.*$"); ++ toregex!(re12, "☃", r"^\xe2\x98\x83$"); ++ ++ matches!(match1, "a", "a"); ++ matches!(match2, "a*b", "a_b"); ++ matches!(match3, "a*b*c", "abc"); ++ matches!(match4, "a*b*c", "a_b_c"); ++ matches!(match5, "a*b*c", "a___b___c"); ++ matches!(match6, "abc*abc*abc", "abcabcabcabcabcabcabc"); ++ matches!(match7, "a*a*a*a*a*a*a*a*a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); ++ matches!(match8, "a*b[xyz]c*d", "abxcdbxcddd"); ++ matches!(match9, "*.rs", ".rs"); ++ matches!(match10, "☃", "☃"); ++ ++ matches!(matchrec1, "some/**/needle.txt", "some/needle.txt"); ++ matches!(matchrec2, "some/**/needle.txt", "some/one/needle.txt"); ++ matches!(matchrec3, "some/**/needle.txt", "some/one/two/needle.txt"); ++ matches!(matchrec4, "some/**/needle.txt", "some/other/needle.txt"); ++ matches!(matchrec5, "**", "abcde"); ++ matches!(matchrec6, "**", ""); ++ matches!(matchrec7, "**", ".asdf"); ++ matches!(matchrec8, "**", "/x/.asdf"); ++ matches!(matchrec9, "some/**/**/needle.txt", "some/needle.txt"); ++ matches!(matchrec10, "some/**/**/needle.txt", "some/one/needle.txt"); ++ matches!(matchrec11, "some/**/**/needle.txt", "some/one/two/needle.txt"); ++ matches!(matchrec12, "some/**/**/needle.txt", "some/other/needle.txt"); ++ matches!(matchrec13, "**/test", "one/two/test"); ++ matches!(matchrec14, "**/test", "one/test"); ++ matches!(matchrec15, "**/test", "test"); ++ matches!(matchrec16, "/**/test", "/one/two/test"); ++ matches!(matchrec17, "/**/test", "/one/test"); ++ matches!(matchrec18, "/**/test", "/test"); ++ matches!(matchrec19, "**/.*", ".abc"); ++ matches!(matchrec20, "**/.*", "abc/.abc"); ++ matches!(matchrec21, ".*/**", ".abc"); ++ matches!(matchrec22, ".*/**", ".abc/abc"); ++ matches!(matchrec23, "foo/**", "foo"); ++ matches!(matchrec24, "**/foo/bar", "foo/bar"); ++ matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt"); ++ ++ matches!(matchrange1, "a[0-9]b", "a0b"); ++ matches!(matchrange2, "a[0-9]b", "a9b"); ++ matches!(matchrange3, "a[!0-9]b", "a_b"); ++ matches!(matchrange4, "[a-z123]", "1"); ++ matches!(matchrange5, "[1a-z23]", "1"); ++ matches!(matchrange6, "[123a-z]", "1"); ++ matches!(matchrange7, "[abc-]", "-"); ++ matches!(matchrange8, "[-abc]", "-"); ++ matches!(matchrange9, "[-a-c]", "b"); ++ matches!(matchrange10, "[a-c-]", "b"); ++ matches!(matchrange11, "[-]", "-"); ++ matches!(matchrange12, "a[^0-9]b", "a_b"); ++ ++ matches!(matchpat1, "*hello.txt", "hello.txt"); ++ matches!(matchpat2, "*hello.txt", "gareth_says_hello.txt"); ++ matches!(matchpat3, "*hello.txt", "some/path/to/hello.txt"); ++ matches!(matchpat4, "*hello.txt", "some\\path\\to\\hello.txt"); ++ matches!(matchpat5, "*hello.txt", "/an/absolute/path/to/hello.txt"); ++ matches!(matchpat6, "*some/path/to/hello.txt", "some/path/to/hello.txt"); ++ matches!(matchpat7, "*some/path/to/hello.txt", ++ "a/bigger/some/path/to/hello.txt"); ++ ++ matches!(matchescape, "_[[]_[]]_[?]_[*]_!_", "_[_]_?_*_!_"); ++ ++ matches!(matchcasei1, "aBcDeFg", "aBcDeFg", CASEI); ++ matches!(matchcasei2, "aBcDeFg", "abcdefg", CASEI); ++ matches!(matchcasei3, "aBcDeFg", "ABCDEFG", CASEI); ++ matches!(matchcasei4, "aBcDeFg", "AbCdEfG", CASEI); ++ ++ matches!(matchalt1, "a,b", "a,b"); ++ matches!(matchalt2, ",", ","); ++ matches!(matchalt3, "{a,b}", "a"); ++ matches!(matchalt4, "{a,b}", "b"); ++ matches!(matchalt5, "{**/src/**,foo}", "abc/src/bar"); ++ matches!(matchalt6, "{**/src/**,foo}", "foo"); ++ matches!(matchalt7, "{[}],foo}", "}"); ++ matches!(matchalt8, "{foo}", "foo"); ++ matches!(matchalt9, "{}", ""); ++ matches!(matchalt10, "{,}", ""); ++ matches!(matchalt11, "{*.foo,*.bar,*.wat}", "test.foo"); ++ matches!(matchalt12, "{*.foo,*.bar,*.wat}", "test.bar"); ++ matches!(matchalt13, "{*.foo,*.bar,*.wat}", "test.wat"); ++ ++ matches!(matchslash1, "abc/def", "abc/def", SLASHLIT); ++ #[cfg(unix)] ++ nmatches!(matchslash2, "abc?def", "abc/def", SLASHLIT); ++ #[cfg(not(unix))] ++ nmatches!(matchslash2, "abc?def", "abc\\def", SLASHLIT); ++ nmatches!(matchslash3, "abc*def", "abc/def", SLASHLIT); ++ matches!(matchslash4, "abc[/]def", "abc/def", SLASHLIT); // differs ++ #[cfg(unix)] ++ nmatches!(matchslash5, "abc\\def", "abc/def", SLASHLIT); ++ #[cfg(not(unix))] ++ matches!(matchslash5, "abc\\def", "abc/def", SLASHLIT); ++ ++ matches!(matchbackslash1, "\\[", "[", BSESC); ++ matches!(matchbackslash2, "\\?", "?", BSESC); ++ matches!(matchbackslash3, "\\*", "*", BSESC); ++ matches!(matchbackslash4, "\\[a-z]", "\\a", NOBSESC); ++ matches!(matchbackslash5, "\\?", "\\a", NOBSESC); ++ matches!(matchbackslash6, "\\*", "\\\\", NOBSESC); ++ #[cfg(unix)] ++ matches!(matchbackslash7, "\\a", "a"); ++ #[cfg(not(unix))] ++ matches!(matchbackslash8, "\\a", "/a"); ++ ++ nmatches!(matchnot1, "a*b*c", "abcd"); ++ nmatches!(matchnot2, "abc*abc*abc", "abcabcabcabcabcabcabca"); ++ nmatches!(matchnot3, "some/**/needle.txt", "some/other/notthis.txt"); ++ nmatches!(matchnot4, "some/**/**/needle.txt", "some/other/notthis.txt"); ++ nmatches!(matchnot5, "/**/test", "test"); ++ nmatches!(matchnot6, "/**/test", "/one/notthis"); ++ nmatches!(matchnot7, "/**/test", "/notthis"); ++ nmatches!(matchnot8, "**/.*", "ab.c"); ++ nmatches!(matchnot9, "**/.*", "abc/ab.c"); ++ nmatches!(matchnot10, ".*/**", "a.bc"); ++ nmatches!(matchnot11, ".*/**", "abc/a.bc"); ++ nmatches!(matchnot12, "a[0-9]b", "a_b"); ++ nmatches!(matchnot13, "a[!0-9]b", "a0b"); ++ nmatches!(matchnot14, "a[!0-9]b", "a9b"); ++ nmatches!(matchnot15, "[!-]", "-"); ++ nmatches!(matchnot16, "*hello.txt", "hello.txt-and-then-some"); ++ nmatches!(matchnot17, "*hello.txt", "goodbye.txt"); ++ nmatches!(matchnot18, "*some/path/to/hello.txt", ++ "some/path/to/hello.txt-and-then-some"); ++ nmatches!(matchnot19, "*some/path/to/hello.txt", ++ "some/other/path/to/hello.txt"); ++ nmatches!(matchnot20, "a", "foo/a"); ++ nmatches!(matchnot21, "./foo", "foo"); ++ nmatches!(matchnot22, "**/foo", "foofoo"); ++ nmatches!(matchnot23, "**/foo/bar", "foofoo/bar"); ++ nmatches!(matchnot24, "/*.c", "mozilla-sha1/sha1.c"); ++ nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT); ++ nmatches!(matchnot26, "**/m4/ltoptions.m4", ++ "csharp/src/packages/repositories.config", SLASHLIT); ++ nmatches!(matchnot27, "a[^0-9]b", "a0b"); ++ nmatches!(matchnot28, "a[^0-9]b", "a9b"); ++ nmatches!(matchnot29, "[^-]", "-"); ++ nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt"); ++ nmatches!( ++ matchrec31, ++ "some/*/needle.txt", "some/one/two/needle.txt", SLASHLIT); ++ nmatches!( ++ matchrec32, ++ "some/*/needle.txt", "some/one/two/three/needle.txt", SLASHLIT); ++ ++ macro_rules! extract { ++ ($which:ident, $name:ident, $pat:expr, $expect:expr) => { ++ extract!($which, $name, $pat, $expect, Options::default()); ++ }; ++ ($which:ident, $name:ident, $pat:expr, $expect:expr, $options:expr) => { ++ #[test] ++ fn $name() { ++ let mut builder = GlobBuilder::new($pat); ++ if let Some(casei) = $options.casei { ++ builder.case_insensitive(casei); ++ } ++ if let Some(litsep) = $options.litsep { ++ builder.literal_separator(litsep); ++ } ++ if let Some(bsesc) = $options.bsesc { ++ builder.backslash_escape(bsesc); ++ } ++ let pat = builder.build().unwrap(); ++ assert_eq!($expect, pat.$which()); ++ } ++ }; ++ } ++ ++ macro_rules! literal { ++ ($($tt:tt)*) => { extract!(literal, $($tt)*); } ++ } ++ ++ macro_rules! basetokens { ++ ($($tt:tt)*) => { extract!(basename_tokens, $($tt)*); } ++ } ++ ++ macro_rules! ext { ++ ($($tt:tt)*) => { extract!(ext, $($tt)*); } ++ } ++ ++ macro_rules! required_ext { ++ ($($tt:tt)*) => { extract!(required_ext, $($tt)*); } ++ } ++ ++ macro_rules! prefix { ++ ($($tt:tt)*) => { extract!(prefix, $($tt)*); } ++ } ++ ++ macro_rules! suffix { ++ ($($tt:tt)*) => { extract!(suffix, $($tt)*); } ++ } ++ ++ macro_rules! baseliteral { ++ ($($tt:tt)*) => { extract!(basename_literal, $($tt)*); } ++ } ++ ++ literal!(extract_lit1, "foo", Some(s("foo"))); ++ literal!(extract_lit2, "foo", None, CASEI); ++ literal!(extract_lit3, "/foo", Some(s("/foo"))); ++ literal!(extract_lit4, "/foo/", Some(s("/foo/"))); ++ literal!(extract_lit5, "/foo/bar", Some(s("/foo/bar"))); ++ literal!(extract_lit6, "*.foo", None); ++ literal!(extract_lit7, "foo/bar", Some(s("foo/bar"))); ++ literal!(extract_lit8, "**/foo/bar", None); ++ ++ basetokens!(extract_basetoks1, "**/foo", Some(&*vec![ ++ Literal('f'), Literal('o'), Literal('o'), ++ ])); ++ basetokens!(extract_basetoks2, "**/foo", None, CASEI); ++ basetokens!(extract_basetoks3, "**/foo", Some(&*vec![ ++ Literal('f'), Literal('o'), Literal('o'), ++ ]), SLASHLIT); ++ basetokens!(extract_basetoks4, "*foo", None, SLASHLIT); ++ basetokens!(extract_basetoks5, "*foo", None); ++ basetokens!(extract_basetoks6, "**/fo*o", None); ++ basetokens!(extract_basetoks7, "**/fo*o", Some(&*vec![ ++ Literal('f'), Literal('o'), ZeroOrMore, Literal('o'), ++ ]), SLASHLIT); ++ ++ ext!(extract_ext1, "**/*.rs", Some(s(".rs"))); ++ ext!(extract_ext2, "**/*.rs.bak", None); ++ ext!(extract_ext3, "*.rs", Some(s(".rs"))); ++ ext!(extract_ext4, "a*.rs", None); ++ ext!(extract_ext5, "/*.c", None); ++ ext!(extract_ext6, "*.c", None, SLASHLIT); ++ ext!(extract_ext7, "*.c", Some(s(".c"))); ++ ++ required_ext!(extract_req_ext1, "*.rs", Some(s(".rs"))); ++ required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(s(".rs"))); ++ required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(s(".rs"))); ++ required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(s(".rs"))); ++ required_ext!(extract_req_ext5, ".rs", Some(s(".rs"))); ++ required_ext!(extract_req_ext6, "./rs", None); ++ required_ext!(extract_req_ext7, "foo", None); ++ required_ext!(extract_req_ext8, ".foo/", None); ++ required_ext!(extract_req_ext9, "foo/", None); ++ ++ prefix!(extract_prefix1, "/foo", Some(s("/foo"))); ++ prefix!(extract_prefix2, "/foo/*", Some(s("/foo/"))); ++ prefix!(extract_prefix3, "**/foo", None); ++ prefix!(extract_prefix4, "foo/**", None); ++ ++ suffix!(extract_suffix1, "**/foo/bar", Some((s("/foo/bar"), true))); ++ suffix!(extract_suffix2, "*/foo/bar", Some((s("/foo/bar"), false))); ++ suffix!(extract_suffix3, "*/foo/bar", None, SLASHLIT); ++ suffix!(extract_suffix4, "foo/bar", Some((s("foo/bar"), false))); ++ suffix!(extract_suffix5, "*.foo", Some((s(".foo"), false))); ++ suffix!(extract_suffix6, "*.foo", None, SLASHLIT); ++ suffix!(extract_suffix7, "**/*_test", Some((s("_test"), false))); ++ ++ baseliteral!(extract_baselit1, "**/foo", Some(s("foo"))); ++ baseliteral!(extract_baselit2, "foo", None); ++ baseliteral!(extract_baselit3, "*foo", None); ++ baseliteral!(extract_baselit4, "*/foo", None); ++} diff --cc vendor/globset-0.4.1/src/lib.rs index 000000000,000000000..50c92e421 new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/src/lib.rs @@@ -1,0 -1,0 +1,867 @@@ ++/*! ++The globset crate provides cross platform single glob and glob set matching. ++ ++Glob set matching is the process of matching one or more glob patterns against ++a single candidate path simultaneously, and returning all of the globs that ++matched. For example, given this set of globs: ++ ++```ignore ++*.rs ++src/lib.rs ++src/**/foo.rs ++``` ++ ++and a path `src/bar/baz/foo.rs`, then the set would report the first and third ++globs as matching. ++ ++# Example: one glob ++ ++This example shows how to match a single glob against a single file path. ++ ++``` ++# fn example() -> Result<(), globset::Error> { ++use globset::Glob; ++ ++let glob = Glob::new("*.rs")?.compile_matcher(); ++ ++assert!(glob.is_match("foo.rs")); ++assert!(glob.is_match("foo/bar.rs")); ++assert!(!glob.is_match("Cargo.toml")); ++# Ok(()) } example().unwrap(); ++``` ++ ++# Example: configuring a glob matcher ++ ++This example shows how to use a `GlobBuilder` to configure aspects of match ++semantics. In this example, we prevent wildcards from matching path separators. ++ ++``` ++# fn example() -> Result<(), globset::Error> { ++use globset::GlobBuilder; ++ ++let glob = GlobBuilder::new("*.rs") ++ .literal_separator(true).build()?.compile_matcher(); ++ ++assert!(glob.is_match("foo.rs")); ++assert!(!glob.is_match("foo/bar.rs")); // no longer matches ++assert!(!glob.is_match("Cargo.toml")); ++# Ok(()) } example().unwrap(); ++``` ++ ++# Example: match multiple globs at once ++ ++This example shows how to match multiple glob patterns at once. ++ ++``` ++# fn example() -> Result<(), globset::Error> { ++use globset::{Glob, GlobSetBuilder}; ++ ++let mut builder = GlobSetBuilder::new(); ++// A GlobBuilder can be used to configure each glob's match semantics ++// independently. ++builder.add(Glob::new("*.rs")?); ++builder.add(Glob::new("src/lib.rs")?); ++builder.add(Glob::new("src/**/foo.rs")?); ++let set = builder.build()?; ++ ++assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]); ++# Ok(()) } example().unwrap(); ++``` ++ ++# Syntax ++ ++Standard Unix-style glob syntax is supported: ++ ++* `?` matches any single character. (If the `literal_separator` option is ++ enabled, then `?` can never match a path separator.) ++* `*` matches zero or more characters. (If the `literal_separator` option is ++ enabled, then `*` can never match a path separator.) ++* `**` recursively matches directories but are only legal in three situations. ++ First, if the glob starts with \*\*/, then it matches ++ all directories. For example, \*\*/foo matches `foo` ++ and `bar/foo` but not `foo/bar`. Secondly, if the glob ends with ++ /\*\*, then it matches all sub-entries. For example, ++ foo/\*\* matches `foo/a` and `foo/a/b`, but not `foo`. ++ Thirdly, if the glob contains /\*\*/ anywhere within ++ the pattern, then it matches zero or more directories. Using `**` anywhere ++ else is illegal (N.B. the glob `**` is allowed and means "match everything"). ++* `{a,b}` matches `a` or `b` where `a` and `b` are arbitrary glob patterns. ++ (N.B. Nesting `{...}` is not currently allowed.) ++* `[ab]` matches `a` or `b` where `a` and `b` are characters. Use ++ `[!ab]` to match any character except for `a` and `b`. ++* Metacharacters such as `*` and `?` can be escaped with character class ++ notation. e.g., `[*]` matches `*`. ++* When backslash escapes are enabled, a backslash (`\`) will escape all meta ++ characters in a glob. If it precedes a non-meta character, then the slash is ++ ignored. A `\\` will match a literal `\\`. Note that this mode is only ++ enabled on Unix platforms by default, but can be enabled on any platform ++ via the `backslash_escape` setting on `Glob`. ++ ++A `GlobBuilder` can be used to prevent wildcards from matching path separators, ++or to enable case insensitive matching. ++*/ ++ ++#![deny(missing_docs)] ++ ++extern crate aho_corasick; ++extern crate fnv; ++#[macro_use] ++extern crate log; ++extern crate memchr; ++extern crate regex; ++ ++use std::borrow::Cow; ++use std::collections::{BTreeMap, HashMap}; ++use std::error::Error as StdError; ++use std::ffi::OsStr; ++use std::fmt; ++use std::hash; ++use std::path::Path; ++use std::str; ++ ++use aho_corasick::{Automaton, AcAutomaton, FullAcAutomaton}; ++use regex::bytes::{Regex, RegexBuilder, RegexSet}; ++ ++use pathutil::{ ++ file_name, file_name_ext, normalize_path, os_str_bytes, path_bytes, ++}; ++use glob::MatchStrategy; ++pub use glob::{Glob, GlobBuilder, GlobMatcher}; ++ ++mod glob; ++mod pathutil; ++ ++/// Represents an error that can occur when parsing a glob pattern. ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub struct Error { ++ /// The original glob provided by the caller. ++ glob: Option, ++ /// The kind of error. ++ kind: ErrorKind, ++} ++ ++/// The kind of error that can occur when parsing a glob pattern. ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub enum ErrorKind { ++ /// Occurs when a use of `**` is invalid. Namely, `**` can only appear ++ /// adjacent to a path separator, or the beginning/end of a glob. ++ InvalidRecursive, ++ /// Occurs when a character class (e.g., `[abc]`) is not closed. ++ UnclosedClass, ++ /// Occurs when a range in a character (e.g., `[a-z]`) is invalid. For ++ /// example, if the range starts with a lexicographically larger character ++ /// than it ends with. ++ InvalidRange(char, char), ++ /// Occurs when a `}` is found without a matching `{`. ++ UnopenedAlternates, ++ /// Occurs when a `{` is found without a matching `}`. ++ UnclosedAlternates, ++ /// Occurs when an alternating group is nested inside another alternating ++ /// group, e.g., `{{a,b},{c,d}}`. ++ NestedAlternates, ++ /// Occurs when an unescaped '\' is found at the end of a glob. ++ DanglingEscape, ++ /// An error associated with parsing or compiling a regex. ++ Regex(String), ++ /// Hints that destructuring should not be exhaustive. ++ /// ++ /// This enum may grow additional variants, so this makes sure clients ++ /// don't count on exhaustive matching. (Otherwise, adding a new variant ++ /// could break existing code.) ++ #[doc(hidden)] ++ __Nonexhaustive, ++} ++ ++impl StdError for Error { ++ fn description(&self) -> &str { ++ self.kind.description() ++ } ++} ++ ++impl Error { ++ /// Return the glob that caused this error, if one exists. ++ pub fn glob(&self) -> Option<&str> { ++ self.glob.as_ref().map(|s| &**s) ++ } ++ ++ /// Return the kind of this error. ++ pub fn kind(&self) -> &ErrorKind { ++ &self.kind ++ } ++} ++ ++impl ErrorKind { ++ fn description(&self) -> &str { ++ match *self { ++ ErrorKind::InvalidRecursive => { ++ "invalid use of **; must be one path component" ++ } ++ ErrorKind::UnclosedClass => { ++ "unclosed character class; missing ']'" ++ } ++ ErrorKind::InvalidRange(_, _) => { ++ "invalid character range" ++ } ++ ErrorKind::UnopenedAlternates => { ++ "unopened alternate group; missing '{' \ ++ (maybe escape '}' with '[}]'?)" ++ } ++ ErrorKind::UnclosedAlternates => { ++ "unclosed alternate group; missing '}' \ ++ (maybe escape '{' with '[{]'?)" ++ } ++ ErrorKind::NestedAlternates => { ++ "nested alternate groups are not allowed" ++ } ++ ErrorKind::DanglingEscape => { ++ "dangling '\\'" ++ } ++ ErrorKind::Regex(ref err) => err, ++ ErrorKind::__Nonexhaustive => unreachable!(), ++ } ++ } ++} ++ ++impl fmt::Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ match self.glob { ++ None => self.kind.fmt(f), ++ Some(ref glob) => { ++ write!(f, "error parsing glob '{}': {}", glob, self.kind) ++ } ++ } ++ } ++} ++ ++impl fmt::Display for ErrorKind { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ match *self { ++ ErrorKind::InvalidRecursive ++ | ErrorKind::UnclosedClass ++ | ErrorKind::UnopenedAlternates ++ | ErrorKind::UnclosedAlternates ++ | ErrorKind::NestedAlternates ++ | ErrorKind::DanglingEscape ++ | ErrorKind::Regex(_) => { ++ write!(f, "{}", self.description()) ++ } ++ ErrorKind::InvalidRange(s, e) => { ++ write!(f, "invalid range; '{}' > '{}'", s, e) ++ } ++ ErrorKind::__Nonexhaustive => unreachable!(), ++ } ++ } ++} ++ ++fn new_regex(pat: &str) -> Result { ++ RegexBuilder::new(pat) ++ .dot_matches_new_line(true) ++ .size_limit(10 * (1 << 20)) ++ .dfa_size_limit(10 * (1 << 20)) ++ .build() ++ .map_err(|err| { ++ Error { ++ glob: Some(pat.to_string()), ++ kind: ErrorKind::Regex(err.to_string()), ++ } ++ }) ++} ++ ++fn new_regex_set(pats: I) -> Result ++ where S: AsRef, I: IntoIterator { ++ RegexSet::new(pats).map_err(|err| { ++ Error { ++ glob: None, ++ kind: ErrorKind::Regex(err.to_string()), ++ } ++ }) ++} ++ ++type Fnv = hash::BuildHasherDefault; ++ ++/// GlobSet represents a group of globs that can be matched together in a ++/// single pass. ++#[derive(Clone, Debug)] ++pub struct GlobSet { ++ len: usize, ++ strats: Vec, ++} ++ ++impl GlobSet { ++ /// Create an empty `GlobSet`. An empty set matches nothing. ++ pub fn empty() -> GlobSet { ++ GlobSet { ++ len: 0, ++ strats: vec![], ++ } ++ } ++ ++ /// Returns true if this set is empty, and therefore matches nothing. ++ pub fn is_empty(&self) -> bool { ++ self.len == 0 ++ } ++ ++ /// Returns the number of globs in this set. ++ pub fn len(&self) -> usize { ++ self.len ++ } ++ ++ /// Returns true if any glob in this set matches the path given. ++ pub fn is_match>(&self, path: P) -> bool { ++ self.is_match_candidate(&Candidate::new(path.as_ref())) ++ } ++ ++ /// Returns true if any glob in this set matches the path given. ++ /// ++ /// This takes a Candidate as input, which can be used to amortize the ++ /// cost of preparing a path for matching. ++ pub fn is_match_candidate(&self, path: &Candidate) -> bool { ++ if self.is_empty() { ++ return false; ++ } ++ for strat in &self.strats { ++ if strat.is_match(path) { ++ return true; ++ } ++ } ++ false ++ } ++ ++ /// Returns the sequence number of every glob pattern that matches the ++ /// given path. ++ pub fn matches>(&self, path: P) -> Vec { ++ self.matches_candidate(&Candidate::new(path.as_ref())) ++ } ++ ++ /// Returns the sequence number of every glob pattern that matches the ++ /// given path. ++ /// ++ /// This takes a Candidate as input, which can be used to amortize the ++ /// cost of preparing a path for matching. ++ pub fn matches_candidate(&self, path: &Candidate) -> Vec { ++ let mut into = vec![]; ++ if self.is_empty() { ++ return into; ++ } ++ self.matches_candidate_into(path, &mut into); ++ into ++ } ++ ++ /// Adds the sequence number of every glob pattern that matches the given ++ /// path to the vec given. ++ /// ++ /// `into` is is cleared before matching begins, and contains the set of ++ /// sequence numbers (in ascending order) after matching ends. If no globs ++ /// were matched, then `into` will be empty. ++ pub fn matches_into>( ++ &self, ++ path: P, ++ into: &mut Vec, ++ ) { ++ self.matches_candidate_into(&Candidate::new(path.as_ref()), into); ++ } ++ ++ /// Adds the sequence number of every glob pattern that matches the given ++ /// path to the vec given. ++ /// ++ /// `into` is is cleared before matching begins, and contains the set of ++ /// sequence numbers (in ascending order) after matching ends. If no globs ++ /// were matched, then `into` will be empty. ++ /// ++ /// This takes a Candidate as input, which can be used to amortize the ++ /// cost of preparing a path for matching. ++ pub fn matches_candidate_into( ++ &self, ++ path: &Candidate, ++ into: &mut Vec, ++ ) { ++ into.clear(); ++ if self.is_empty() { ++ return; ++ } ++ for strat in &self.strats { ++ strat.matches_into(path, into); ++ } ++ into.sort(); ++ into.dedup(); ++ } ++ ++ fn new(pats: &[Glob]) -> Result { ++ if pats.is_empty() { ++ return Ok(GlobSet { len: 0, strats: vec![] }); ++ } ++ let mut lits = LiteralStrategy::new(); ++ let mut base_lits = BasenameLiteralStrategy::new(); ++ let mut exts = ExtensionStrategy::new(); ++ let mut prefixes = MultiStrategyBuilder::new(); ++ let mut suffixes = MultiStrategyBuilder::new(); ++ let mut required_exts = RequiredExtensionStrategyBuilder::new(); ++ let mut regexes = MultiStrategyBuilder::new(); ++ for (i, p) in pats.iter().enumerate() { ++ match MatchStrategy::new(p) { ++ MatchStrategy::Literal(lit) => { ++ lits.add(i, lit); ++ } ++ MatchStrategy::BasenameLiteral(lit) => { ++ base_lits.add(i, lit); ++ } ++ MatchStrategy::Extension(ext) => { ++ exts.add(i, ext); ++ } ++ MatchStrategy::Prefix(prefix) => { ++ prefixes.add(i, prefix); ++ } ++ MatchStrategy::Suffix { suffix, component } => { ++ if component { ++ lits.add(i, suffix[1..].to_string()); ++ } ++ suffixes.add(i, suffix); ++ } ++ MatchStrategy::RequiredExtension(ext) => { ++ required_exts.add(i, ext, p.regex().to_owned()); ++ } ++ MatchStrategy::Regex => { ++ debug!("glob converted to regex: {:?}", p); ++ regexes.add(i, p.regex().to_owned()); ++ } ++ } ++ } ++ debug!("built glob set; {} literals, {} basenames, {} extensions, \ ++ {} prefixes, {} suffixes, {} required extensions, {} regexes", ++ lits.0.len(), base_lits.0.len(), exts.0.len(), ++ prefixes.literals.len(), suffixes.literals.len(), ++ required_exts.0.len(), regexes.literals.len()); ++ Ok(GlobSet { ++ len: pats.len(), ++ strats: vec![ ++ GlobSetMatchStrategy::Extension(exts), ++ GlobSetMatchStrategy::BasenameLiteral(base_lits), ++ GlobSetMatchStrategy::Literal(lits), ++ GlobSetMatchStrategy::Suffix(suffixes.suffix()), ++ GlobSetMatchStrategy::Prefix(prefixes.prefix()), ++ GlobSetMatchStrategy::RequiredExtension( ++ required_exts.build()?), ++ GlobSetMatchStrategy::Regex(regexes.regex_set()?), ++ ], ++ }) ++ } ++} ++ ++/// GlobSetBuilder builds a group of patterns that can be used to ++/// simultaneously match a file path. ++#[derive(Clone, Debug)] ++pub struct GlobSetBuilder { ++ pats: Vec, ++} ++ ++impl GlobSetBuilder { ++ /// Create a new GlobSetBuilder. A GlobSetBuilder can be used to add new ++ /// patterns. Once all patterns have been added, `build` should be called ++ /// to produce a `GlobSet`, which can then be used for matching. ++ pub fn new() -> GlobSetBuilder { ++ GlobSetBuilder { pats: vec![] } ++ } ++ ++ /// Builds a new matcher from all of the glob patterns added so far. ++ /// ++ /// Once a matcher is built, no new patterns can be added to it. ++ pub fn build(&self) -> Result { ++ GlobSet::new(&self.pats) ++ } ++ ++ /// Add a new pattern to this set. ++ #[allow(dead_code)] ++ pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder { ++ self.pats.push(pat); ++ self ++ } ++} ++ ++/// A candidate path for matching. ++/// ++/// All glob matching in this crate operates on `Candidate` values. ++/// Constructing candidates has a very small cost associated with it, so ++/// callers may find it beneficial to amortize that cost when matching a single ++/// path against multiple globs or sets of globs. ++#[derive(Clone, Debug)] ++pub struct Candidate<'a> { ++ path: Cow<'a, [u8]>, ++ basename: Cow<'a, [u8]>, ++ ext: Cow<'a, [u8]>, ++} ++ ++impl<'a> Candidate<'a> { ++ /// Create a new candidate for matching from the given path. ++ pub fn new + ?Sized>(path: &'a P) -> Candidate<'a> { ++ let path = path.as_ref(); ++ let basename = file_name(path).unwrap_or(OsStr::new("")); ++ Candidate { ++ path: normalize_path(path_bytes(path)), ++ basename: os_str_bytes(basename), ++ ext: file_name_ext(basename).unwrap_or(Cow::Borrowed(b"")), ++ } ++ } ++ ++ fn path_prefix(&self, max: usize) -> &[u8] { ++ if self.path.len() <= max { ++ &*self.path ++ } else { ++ &self.path[..max] ++ } ++ } ++ ++ fn path_suffix(&self, max: usize) -> &[u8] { ++ if self.path.len() <= max { ++ &*self.path ++ } else { ++ &self.path[self.path.len() - max..] ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++enum GlobSetMatchStrategy { ++ Literal(LiteralStrategy), ++ BasenameLiteral(BasenameLiteralStrategy), ++ Extension(ExtensionStrategy), ++ Prefix(PrefixStrategy), ++ Suffix(SuffixStrategy), ++ RequiredExtension(RequiredExtensionStrategy), ++ Regex(RegexSetStrategy), ++} ++ ++impl GlobSetMatchStrategy { ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ use self::GlobSetMatchStrategy::*; ++ match *self { ++ Literal(ref s) => s.is_match(candidate), ++ BasenameLiteral(ref s) => s.is_match(candidate), ++ Extension(ref s) => s.is_match(candidate), ++ Prefix(ref s) => s.is_match(candidate), ++ Suffix(ref s) => s.is_match(candidate), ++ RequiredExtension(ref s) => s.is_match(candidate), ++ Regex(ref s) => s.is_match(candidate), ++ } ++ } ++ ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ use self::GlobSetMatchStrategy::*; ++ match *self { ++ Literal(ref s) => s.matches_into(candidate, matches), ++ BasenameLiteral(ref s) => s.matches_into(candidate, matches), ++ Extension(ref s) => s.matches_into(candidate, matches), ++ Prefix(ref s) => s.matches_into(candidate, matches), ++ Suffix(ref s) => s.matches_into(candidate, matches), ++ RequiredExtension(ref s) => s.matches_into(candidate, matches), ++ Regex(ref s) => s.matches_into(candidate, matches), ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct LiteralStrategy(BTreeMap, Vec>); ++ ++impl LiteralStrategy { ++ fn new() -> LiteralStrategy { ++ LiteralStrategy(BTreeMap::new()) ++ } ++ ++ fn add(&mut self, global_index: usize, lit: String) { ++ self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index); ++ } ++ ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ self.0.contains_key(&*candidate.path) ++ } ++ ++ #[inline(never)] ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ if let Some(hits) = self.0.get(&*candidate.path) { ++ matches.extend(hits); ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct BasenameLiteralStrategy(BTreeMap, Vec>); ++ ++impl BasenameLiteralStrategy { ++ fn new() -> BasenameLiteralStrategy { ++ BasenameLiteralStrategy(BTreeMap::new()) ++ } ++ ++ fn add(&mut self, global_index: usize, lit: String) { ++ self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index); ++ } ++ ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ if candidate.basename.is_empty() { ++ return false; ++ } ++ self.0.contains_key(&*candidate.basename) ++ } ++ ++ #[inline(never)] ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ if candidate.basename.is_empty() { ++ return; ++ } ++ if let Some(hits) = self.0.get(&*candidate.basename) { ++ matches.extend(hits); ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct ExtensionStrategy(HashMap, Vec, Fnv>); ++ ++impl ExtensionStrategy { ++ fn new() -> ExtensionStrategy { ++ ExtensionStrategy(HashMap::with_hasher(Fnv::default())) ++ } ++ ++ fn add(&mut self, global_index: usize, ext: String) { ++ self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index); ++ } ++ ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ if candidate.ext.is_empty() { ++ return false; ++ } ++ self.0.contains_key(&*candidate.ext) ++ } ++ ++ #[inline(never)] ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ if candidate.ext.is_empty() { ++ return; ++ } ++ if let Some(hits) = self.0.get(&*candidate.ext) { ++ matches.extend(hits); ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct PrefixStrategy { ++ matcher: FullAcAutomaton>, ++ map: Vec, ++ longest: usize, ++} ++ ++impl PrefixStrategy { ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ let path = candidate.path_prefix(self.longest); ++ for m in self.matcher.find_overlapping(path) { ++ if m.start == 0 { ++ return true; ++ } ++ } ++ false ++ } ++ ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ let path = candidate.path_prefix(self.longest); ++ for m in self.matcher.find_overlapping(path) { ++ if m.start == 0 { ++ matches.push(self.map[m.pati]); ++ } ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct SuffixStrategy { ++ matcher: FullAcAutomaton>, ++ map: Vec, ++ longest: usize, ++} ++ ++impl SuffixStrategy { ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ let path = candidate.path_suffix(self.longest); ++ for m in self.matcher.find_overlapping(path) { ++ if m.end == path.len() { ++ return true; ++ } ++ } ++ false ++ } ++ ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ let path = candidate.path_suffix(self.longest); ++ for m in self.matcher.find_overlapping(path) { ++ if m.end == path.len() { ++ matches.push(self.map[m.pati]); ++ } ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct RequiredExtensionStrategy(HashMap, Vec<(usize, Regex)>, Fnv>); ++ ++impl RequiredExtensionStrategy { ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ if candidate.ext.is_empty() { ++ return false; ++ } ++ match self.0.get(&*candidate.ext) { ++ None => false, ++ Some(regexes) => { ++ for &(_, ref re) in regexes { ++ if re.is_match(&*candidate.path) { ++ return true; ++ } ++ } ++ false ++ } ++ } ++ } ++ ++ #[inline(never)] ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ if candidate.ext.is_empty() { ++ return; ++ } ++ if let Some(regexes) = self.0.get(&*candidate.ext) { ++ for &(global_index, ref re) in regexes { ++ if re.is_match(&*candidate.path) { ++ matches.push(global_index); ++ } ++ } ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct RegexSetStrategy { ++ matcher: RegexSet, ++ map: Vec, ++} ++ ++impl RegexSetStrategy { ++ fn is_match(&self, candidate: &Candidate) -> bool { ++ self.matcher.is_match(&*candidate.path) ++ } ++ ++ fn matches_into(&self, candidate: &Candidate, matches: &mut Vec) { ++ for i in self.matcher.matches(&*candidate.path) { ++ matches.push(self.map[i]); ++ } ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct MultiStrategyBuilder { ++ literals: Vec, ++ map: Vec, ++ longest: usize, ++} ++ ++impl MultiStrategyBuilder { ++ fn new() -> MultiStrategyBuilder { ++ MultiStrategyBuilder { ++ literals: vec![], ++ map: vec![], ++ longest: 0, ++ } ++ } ++ ++ fn add(&mut self, global_index: usize, literal: String) { ++ if literal.len() > self.longest { ++ self.longest = literal.len(); ++ } ++ self.map.push(global_index); ++ self.literals.push(literal); ++ } ++ ++ fn prefix(self) -> PrefixStrategy { ++ let it = self.literals.into_iter().map(|s| s.into_bytes()); ++ PrefixStrategy { ++ matcher: AcAutomaton::new(it).into_full(), ++ map: self.map, ++ longest: self.longest, ++ } ++ } ++ ++ fn suffix(self) -> SuffixStrategy { ++ let it = self.literals.into_iter().map(|s| s.into_bytes()); ++ SuffixStrategy { ++ matcher: AcAutomaton::new(it).into_full(), ++ map: self.map, ++ longest: self.longest, ++ } ++ } ++ ++ fn regex_set(self) -> Result { ++ Ok(RegexSetStrategy { ++ matcher: new_regex_set(self.literals)?, ++ map: self.map, ++ }) ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct RequiredExtensionStrategyBuilder( ++ HashMap, Vec<(usize, String)>>, ++); ++ ++impl RequiredExtensionStrategyBuilder { ++ fn new() -> RequiredExtensionStrategyBuilder { ++ RequiredExtensionStrategyBuilder(HashMap::new()) ++ } ++ ++ fn add(&mut self, global_index: usize, ext: String, regex: String) { ++ self.0 ++ .entry(ext.into_bytes()) ++ .or_insert(vec![]) ++ .push((global_index, regex)); ++ } ++ ++ fn build(self) -> Result { ++ let mut exts = HashMap::with_hasher(Fnv::default()); ++ for (ext, regexes) in self.0.into_iter() { ++ exts.insert(ext.clone(), vec![]); ++ for (global_index, regex) in regexes { ++ let compiled = new_regex(®ex)?; ++ exts.get_mut(&ext).unwrap().push((global_index, compiled)); ++ } ++ } ++ Ok(RequiredExtensionStrategy(exts)) ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::GlobSetBuilder; ++ use glob::Glob; ++ ++ #[test] ++ fn set_works() { ++ let mut builder = GlobSetBuilder::new(); ++ builder.add(Glob::new("src/**/*.rs").unwrap()); ++ builder.add(Glob::new("*.c").unwrap()); ++ builder.add(Glob::new("src/lib.rs").unwrap()); ++ let set = builder.build().unwrap(); ++ ++ assert!(set.is_match("foo.c")); ++ assert!(set.is_match("src/foo.c")); ++ assert!(!set.is_match("foo.rs")); ++ assert!(!set.is_match("tests/foo.rs")); ++ assert!(set.is_match("src/foo.rs")); ++ assert!(set.is_match("src/grep/src/main.rs")); ++ ++ let matches = set.matches("src/lib.rs"); ++ assert_eq!(2, matches.len()); ++ assert_eq!(0, matches[0]); ++ assert_eq!(2, matches[1]); ++ } ++ ++ #[test] ++ fn empty_set_works() { ++ let set = GlobSetBuilder::new().build().unwrap(); ++ assert!(!set.is_match("")); ++ assert!(!set.is_match("a")); ++ } ++} diff --cc vendor/globset-0.4.1/src/pathutil.rs index 000000000,000000000..4b808e86d new file mode 100644 --- /dev/null +++ b/vendor/globset-0.4.1/src/pathutil.rs @@@ -1,0 -1,0 +1,172 @@@ ++use std::borrow::Cow; ++use std::ffi::OsStr; ++use std::path::Path; ++ ++/// The final component of the path, if it is a normal file. ++/// ++/// If the path terminates in ., .., or consists solely of a root of prefix, ++/// file_name will return None. ++#[cfg(unix)] ++pub fn file_name<'a, P: AsRef + ?Sized>( ++ path: &'a P, ++) -> Option<&'a OsStr> { ++ use std::os::unix::ffi::OsStrExt; ++ use memchr::memrchr; ++ ++ let path = path.as_ref().as_os_str().as_bytes(); ++ if path.is_empty() { ++ return None; ++ } else if path.len() == 1 && path[0] == b'.' { ++ return None; ++ } else if path.last() == Some(&b'.') { ++ return None; ++ } else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] { ++ return None; ++ } ++ let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0); ++ Some(OsStr::from_bytes(&path[last_slash..])) ++} ++ ++/// The final component of the path, if it is a normal file. ++/// ++/// If the path terminates in ., .., or consists solely of a root of prefix, ++/// file_name will return None. ++#[cfg(not(unix))] ++pub fn file_name<'a, P: AsRef + ?Sized>( ++ path: &'a P, ++) -> Option<&'a OsStr> { ++ path.as_ref().file_name() ++} ++ ++/// Return a file extension given a path's file name. ++/// ++/// Note that this does NOT match the semantics of std::path::Path::extension. ++/// Namely, the extension includes the `.` and matching is otherwise more ++/// liberal. Specifically, the extenion is: ++/// ++/// * None, if the file name given is empty; ++/// * None, if there is no embedded `.`; ++/// * Otherwise, the portion of the file name starting with the final `.`. ++/// ++/// e.g., A file name of `.rs` has an extension `.rs`. ++/// ++/// N.B. This is done to make certain glob match optimizations easier. Namely, ++/// a pattern like `*.rs` is obviously trying to match files with a `rs` ++/// extension, but it also matches files like `.rs`, which doesn't have an ++/// extension according to std::path::Path::extension. ++pub fn file_name_ext(name: &OsStr) -> Option> { ++ if name.is_empty() { ++ return None; ++ } ++ let name = os_str_bytes(name); ++ let last_dot_at = { ++ let result = name ++ .iter().enumerate().rev() ++ .find(|&(_, &b)| b == b'.') ++ .map(|(i, _)| i); ++ match result { ++ None => return None, ++ Some(i) => i, ++ } ++ }; ++ Some(match name { ++ Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]), ++ Cow::Owned(mut name) => { ++ name.drain(..last_dot_at); ++ Cow::Owned(name) ++ } ++ }) ++} ++ ++/// Return raw bytes of a path, transcoded to UTF-8 if necessary. ++pub fn path_bytes(path: &Path) -> Cow<[u8]> { ++ os_str_bytes(path.as_os_str()) ++} ++ ++/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8. ++#[cfg(unix)] ++pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> { ++ use std::os::unix::ffi::OsStrExt; ++ Cow::Borrowed(s.as_bytes()) ++} ++ ++/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8. ++#[cfg(not(unix))] ++pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> { ++ // TODO(burntsushi): On Windows, OS strings are WTF-8, which is a superset ++ // of UTF-8, so even if we could get at the raw bytes, they wouldn't ++ // be useful. We *must* convert to UTF-8 before doing path matching. ++ // Unfortunate, but necessary. ++ match s.to_string_lossy() { ++ Cow::Owned(s) => Cow::Owned(s.into_bytes()), ++ Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), ++ } ++} ++ ++/// Normalizes a path to use `/` as a separator everywhere, even on platforms ++/// that recognize other characters as separators. ++#[cfg(unix)] ++pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> { ++ // UNIX only uses /, so we're good. ++ path ++} ++ ++/// Normalizes a path to use `/` as a separator everywhere, even on platforms ++/// that recognize other characters as separators. ++#[cfg(not(unix))] ++pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> { ++ use std::path::is_separator; ++ ++ for i in 0..path.len() { ++ if path[i] == b'/' || !is_separator(path[i] as char) { ++ continue; ++ } ++ path.to_mut()[i] = b'/'; ++ } ++ path ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::borrow::Cow; ++ use std::ffi::OsStr; ++ ++ use super::{file_name_ext, normalize_path}; ++ ++ macro_rules! ext { ++ ($name:ident, $file_name:expr, $ext:expr) => { ++ #[test] ++ fn $name() { ++ let got = file_name_ext(OsStr::new($file_name)); ++ assert_eq!($ext.map(|s| Cow::Borrowed(s.as_bytes())), got); ++ } ++ }; ++ } ++ ++ ext!(ext1, "foo.rs", Some(".rs")); ++ ext!(ext2, ".rs", Some(".rs")); ++ ext!(ext3, "..rs", Some(".rs")); ++ ext!(ext4, "", None::<&str>); ++ ext!(ext5, "foo", None::<&str>); ++ ++ macro_rules! normalize { ++ ($name:ident, $path:expr, $expected:expr) => { ++ #[test] ++ fn $name() { ++ let got = normalize_path(Cow::Owned($path.to_vec())); ++ assert_eq!($expected.to_vec(), got.into_owned()); ++ } ++ }; ++ } ++ ++ normalize!(normal1, b"foo", b"foo"); ++ normalize!(normal2, b"foo/bar", b"foo/bar"); ++ #[cfg(unix)] ++ normalize!(normal3, b"foo\\bar", b"foo\\bar"); ++ #[cfg(not(unix))] ++ normalize!(normal3, b"foo\\bar", b"foo/bar"); ++ #[cfg(unix)] ++ normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz"); ++ #[cfg(not(unix))] ++ normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz"); ++} diff --cc vendor/ignore-0.4.3/.cargo-checksum.json index 000000000,000000000..c1fd90c03 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"3e9faa7c84064f07b40da27044af629f578bc7994b650d3e458d0c29183c1d91"} diff --cc vendor/ignore-0.4.3/COPYING index 000000000,000000000..bb9c20a09 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/COPYING @@@ -1,0 -1,0 +1,3 @@@ ++This project is dual-licensed under the Unlicense and MIT licenses. ++ ++You may use this code under the terms of either license. diff --cc vendor/ignore-0.4.3/Cargo.toml index 000000000,000000000..bcd6ec212 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/Cargo.toml @@@ -1,0 -1,0 +1,61 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "ignore" ++version = "0.4.3" ++authors = ["Andrew Gallant "] ++description = "A fast library for efficiently matching ignore files such as `.gitignore`\nagainst file paths.\n" ++homepage = "https://github.com/BurntSushi/ripgrep/tree/master/ignore" ++documentation = "https://docs.rs/ignore" ++readme = "README.md" ++keywords = ["glob", "ignore", "gitignore", "pattern", "file"] ++license = "Unlicense/MIT" ++repository = "https://github.com/BurntSushi/ripgrep/tree/master/ignore" ++ ++[lib] ++name = "ignore" ++bench = false ++[dependencies.crossbeam] ++version = "0.3" ++ ++[dependencies.globset] ++version = "0.4.0" ++ ++[dependencies.lazy_static] ++version = "1" ++ ++[dependencies.log] ++version = "0.4" ++ ++[dependencies.memchr] ++version = "2" ++ ++[dependencies.regex] ++version = "1" ++ ++[dependencies.same-file] ++version = "1" ++ ++[dependencies.thread_local] ++version = "0.3.2" ++ ++[dependencies.walkdir] ++version = "2" ++[dev-dependencies.tempdir] ++version = "0.3.5" ++ ++[features] ++simd-accel = ["globset/simd-accel"] ++[target."cfg(windows)".dependencies.winapi] ++version = "0.3" ++features = ["std", "winnt"] diff --cc vendor/ignore-0.4.3/LICENSE-MIT index 000000000,000000000..3b0a5dc09 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/LICENSE-MIT @@@ -1,0 -1,0 +1,21 @@@ ++The MIT License (MIT) ++ ++Copyright (c) 2015 Andrew Gallant ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. diff --cc vendor/ignore-0.4.3/README.md index 000000000,000000000..f527da46a new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/README.md @@@ -1,0 -1,0 +1,66 @@@ ++ignore ++====== ++The ignore crate provides a fast recursive directory iterator that respects ++various filters such as globs, file types and `.gitignore` files. This crate ++also provides lower level direct access to gitignore and file type matchers. ++ ++[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.png)](https://travis-ci.org/BurntSushi/ripgrep) ++[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep) ++[![](https://img.shields.io/crates/v/ignore.svg)](https://crates.io/crates/ignore) ++ ++Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). ++ ++### Documentation ++ ++[https://docs.rs/ignore](https://docs.rs/ignore) ++ ++### Usage ++ ++Add this to your `Cargo.toml`: ++ ++```toml ++[dependencies] ++ignore = "0.4" ++``` ++ ++and this to your crate root: ++ ++```rust ++extern crate ignore; ++``` ++ ++### Example ++ ++This example shows the most basic usage of this crate. This code will ++recursively traverse the current directory while automatically filtering out ++files and directories according to ignore globs found in files like ++`.ignore` and `.gitignore`: ++ ++ ++```rust,no_run ++use ignore::Walk; ++ ++for result in Walk::new("./") { ++ // Each item yielded by the iterator is either a directory entry or an ++ // error, so either print the path or the error. ++ match result { ++ Ok(entry) => println!("{}", entry.path().display()), ++ Err(err) => println!("ERROR: {}", err), ++ } ++} ++``` ++ ++### Example: advanced ++ ++By default, the recursive directory iterator will ignore hidden files and ++directories. This can be disabled by building the iterator with `WalkBuilder`: ++ ++```rust,no_run ++use ignore::WalkBuilder; ++ ++for result in WalkBuilder::new("./").hidden(false).build() { ++ println!("{:?}", result); ++} ++``` ++ ++See the documentation for `WalkBuilder` for many other options. diff --cc vendor/ignore-0.4.3/UNLICENSE index 000000000,000000000..68a49daad new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/UNLICENSE @@@ -1,0 -1,0 +1,24 @@@ ++This is free and unencumbered software released into the public domain. ++ ++Anyone is free to copy, modify, publish, use, compile, sell, or ++distribute this software, either in source code form or as a compiled ++binary, for any purpose, commercial or non-commercial, and by any ++means. ++ ++In jurisdictions that recognize copyright laws, the author or authors ++of this software dedicate any and all copyright interest in the ++software to the public domain. We make this dedication for the benefit ++of the public at large and to the detriment of our heirs and ++successors. We intend this dedication to be an overt act of ++relinquishment in perpetuity of all present and future rights to this ++software under copyright law. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++OTHER DEALINGS IN THE SOFTWARE. ++ ++For more information, please refer to diff --cc vendor/ignore-0.4.3/examples/walk.rs index 000000000,000000000..0ff4ea94e new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/examples/walk.rs @@@ -1,0 -1,0 +1,92 @@@ ++#![allow(dead_code, unused_imports, unused_mut, unused_variables)] ++ ++extern crate crossbeam; ++extern crate ignore; ++extern crate walkdir; ++ ++use std::env; ++use std::io::{self, Write}; ++use std::path::Path; ++use std::sync::Arc; ++use std::sync::atomic::{AtomicUsize, Ordering}; ++use std::thread; ++ ++use crossbeam::sync::MsQueue; ++use ignore::WalkBuilder; ++use walkdir::WalkDir; ++ ++fn main() { ++ let mut path = env::args().nth(1).unwrap(); ++ let mut parallel = false; ++ let mut simple = false; ++ let queue: Arc>> = Arc::new(MsQueue::new()); ++ if path == "parallel" { ++ path = env::args().nth(2).unwrap(); ++ parallel = true; ++ } else if path == "walkdir" { ++ path = env::args().nth(2).unwrap(); ++ simple = true; ++ } ++ ++ let stdout_queue = queue.clone(); ++ let stdout_thread = thread::spawn(move || { ++ let mut stdout = io::BufWriter::new(io::stdout()); ++ while let Some(dent) = stdout_queue.pop() { ++ write_path(&mut stdout, dent.path()); ++ } ++ }); ++ ++ if parallel { ++ let walker = WalkBuilder::new(path).threads(6).build_parallel(); ++ walker.run(|| { ++ let queue = queue.clone(); ++ Box::new(move |result| { ++ use ignore::WalkState::*; ++ ++ queue.push(Some(DirEntry::Y(result.unwrap()))); ++ Continue ++ }) ++ }); ++ } else if simple { ++ let mut stdout = io::BufWriter::new(io::stdout()); ++ let walker = WalkDir::new(path); ++ for result in walker { ++ queue.push(Some(DirEntry::X(result.unwrap()))); ++ } ++ } else { ++ let mut stdout = io::BufWriter::new(io::stdout()); ++ let walker = WalkBuilder::new(path).build(); ++ for result in walker { ++ queue.push(Some(DirEntry::Y(result.unwrap()))); ++ } ++ } ++ queue.push(None); ++ stdout_thread.join().unwrap(); ++} ++ ++enum DirEntry { ++ X(walkdir::DirEntry), ++ Y(ignore::DirEntry), ++} ++ ++impl DirEntry { ++ fn path(&self) -> &Path { ++ match *self { ++ DirEntry::X(ref x) => x.path(), ++ DirEntry::Y(ref y) => y.path(), ++ } ++ } ++} ++ ++#[cfg(unix)] ++fn write_path(mut wtr: W, path: &Path) { ++ use std::os::unix::ffi::OsStrExt; ++ wtr.write(path.as_os_str().as_bytes()).unwrap(); ++ wtr.write(b"\n").unwrap(); ++} ++ ++#[cfg(not(unix))] ++fn write_path(mut wtr: W, path: &Path) { ++ wtr.write(path.to_string_lossy().as_bytes()).unwrap(); ++ wtr.write(b"\n").unwrap(); ++} diff --cc vendor/ignore-0.4.3/src/dir.rs index 000000000,000000000..162b4c035 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/dir.rs @@@ -1,0 -1,0 +1,940 @@@ ++// This module provides a data structure, `Ignore`, that connects "directory ++// traversal" with "ignore matchers." Specifically, it knows about gitignore ++// semantics and precedence, and is organized based on directory hierarchy. ++// Namely, every matcher logically corresponds to ignore rules from a single ++// directory, and points to the matcher for its corresponding parent directory. ++// In this sense, `Ignore` is a *persistent* data structure. ++// ++// This design was specifically chosen to make it possible to use this data ++// structure in a parallel directory iterator. ++// ++// My initial intention was to expose this module as part of this crate's ++// public API, but I think the data structure's public API is too complicated ++// with non-obvious failure modes. Alas, such things haven't been documented ++// well. ++ ++use std::collections::HashMap; ++use std::ffi::{OsString, OsStr}; ++use std::path::{Path, PathBuf}; ++use std::sync::{Arc, RwLock}; ++ ++use gitignore::{self, Gitignore, GitignoreBuilder}; ++use pathutil::{is_hidden, strip_prefix}; ++use overrides::{self, Override}; ++use types::{self, Types}; ++use {Error, Match, PartialErrorBuilder}; ++ ++/// IgnoreMatch represents information about where a match came from when using ++/// the `Ignore` matcher. ++#[derive(Clone, Debug)] ++pub struct IgnoreMatch<'a>(IgnoreMatchInner<'a>); ++ ++/// IgnoreMatchInner describes precisely where the match information came from. ++/// This is private to allow expansion to more matchers in the future. ++#[derive(Clone, Debug)] ++enum IgnoreMatchInner<'a> { ++ Override(overrides::Glob<'a>), ++ Gitignore(&'a gitignore::Glob), ++ Types(types::Glob<'a>), ++ Hidden, ++} ++ ++impl<'a> IgnoreMatch<'a> { ++ fn overrides(x: overrides::Glob<'a>) -> IgnoreMatch<'a> { ++ IgnoreMatch(IgnoreMatchInner::Override(x)) ++ } ++ ++ fn gitignore(x: &'a gitignore::Glob) -> IgnoreMatch<'a> { ++ IgnoreMatch(IgnoreMatchInner::Gitignore(x)) ++ } ++ ++ fn types(x: types::Glob<'a>) -> IgnoreMatch<'a> { ++ IgnoreMatch(IgnoreMatchInner::Types(x)) ++ } ++ ++ fn hidden() -> IgnoreMatch<'static> { ++ IgnoreMatch(IgnoreMatchInner::Hidden) ++ } ++} ++ ++/// Options for the ignore matcher, shared between the matcher itself and the ++/// builder. ++#[derive(Clone, Copy, Debug)] ++struct IgnoreOptions { ++ /// Whether to ignore hidden file paths or not. ++ hidden: bool, ++ /// Whether to read .ignore files. ++ ignore: bool, ++ /// Whether to respect any ignore files in parent directories. ++ parents: bool, ++ /// Whether to read git's global gitignore file. ++ git_global: bool, ++ /// Whether to read .gitignore files. ++ git_ignore: bool, ++ /// Whether to read .git/info/exclude files. ++ git_exclude: bool, ++} ++ ++/// Ignore is a matcher useful for recursively walking one or more directories. ++#[derive(Clone, Debug)] ++pub struct Ignore(Arc); ++ ++#[derive(Clone, Debug)] ++struct IgnoreInner { ++ /// A map of all existing directories that have already been ++ /// compiled into matchers. ++ /// ++ /// Note that this is never used during matching, only when adding new ++ /// parent directory matchers. This avoids needing to rebuild glob sets for ++ /// parent directories if many paths are being searched. ++ compiled: Arc>>, ++ /// The path to the directory that this matcher was built from. ++ dir: PathBuf, ++ /// An override matcher (default is empty). ++ overrides: Arc, ++ /// A file type matcher. ++ types: Arc, ++ /// The parent directory to match next. ++ /// ++ /// If this is the root directory or there are otherwise no more ++ /// directories to match, then `parent` is `None`. ++ parent: Option, ++ /// Whether this is an absolute parent matcher, as added by add_parent. ++ is_absolute_parent: bool, ++ /// The absolute base path of this matcher. Populated only if parent ++ /// directories are added. ++ absolute_base: Option>, ++ /// Explicit global ignore matchers specified by the caller. ++ explicit_ignores: Arc>, ++ /// Ignore files used in addition to `.ignore` ++ custom_ignore_filenames: Arc>, ++ /// The matcher for custom ignore files ++ custom_ignore_matcher: Gitignore, ++ /// The matcher for .ignore files. ++ ignore_matcher: Gitignore, ++ /// A global gitignore matcher, usually from $XDG_CONFIG_HOME/git/ignore. ++ git_global_matcher: Arc, ++ /// The matcher for .gitignore files. ++ git_ignore_matcher: Gitignore, ++ /// Special matcher for `.git/info/exclude` files. ++ git_exclude_matcher: Gitignore, ++ /// Whether this directory contains a .git sub-directory. ++ has_git: bool, ++ /// Ignore config. ++ opts: IgnoreOptions, ++} ++ ++impl Ignore { ++ /// Return the directory path of this matcher. ++ pub fn path(&self) -> &Path { ++ &self.0.dir ++ } ++ ++ /// Return true if this matcher has no parent. ++ pub fn is_root(&self) -> bool { ++ self.0.parent.is_none() ++ } ++ ++ /// Returns true if this matcher was added via the `add_parents` method. ++ pub fn is_absolute_parent(&self) -> bool { ++ self.0.is_absolute_parent ++ } ++ ++ /// Return this matcher's parent, if one exists. ++ pub fn parent(&self) -> Option { ++ self.0.parent.clone() ++ } ++ ++ /// Create a new `Ignore` matcher with the parent directories of `dir`. ++ /// ++ /// Note that this can only be called on an `Ignore` matcher with no ++ /// parents (i.e., `is_root` returns `true`). This will panic otherwise. ++ pub fn add_parents>( ++ &self, ++ path: P, ++ ) -> (Ignore, Option) { ++ if !self.0.opts.parents ++ && !self.0.opts.git_ignore ++ && !self.0.opts.git_exclude ++ && !self.0.opts.git_global ++ { ++ // If we never need info from parent directories, then don't do ++ // anything. ++ return (self.clone(), None); ++ } ++ if !self.is_root() { ++ panic!("Ignore::add_parents called on non-root matcher"); ++ } ++ let absolute_base = match path.as_ref().canonicalize() { ++ Ok(path) => Arc::new(path), ++ Err(_) => { ++ // There's not much we can do here, so just return our ++ // existing matcher. We drop the error to be consistent ++ // with our general pattern of ignoring I/O errors when ++ // processing ignore files. ++ return (self.clone(), None); ++ } ++ }; ++ // List of parents, from child to root. ++ let mut parents = vec![]; ++ let mut path = &**absolute_base; ++ while let Some(parent) = path.parent() { ++ parents.push(parent); ++ path = parent; ++ } ++ let mut errs = PartialErrorBuilder::default(); ++ let mut ig = self.clone(); ++ for parent in parents.into_iter().rev() { ++ let mut compiled = self.0.compiled.write().unwrap(); ++ if let Some(prebuilt) = compiled.get(parent.as_os_str()) { ++ ig = prebuilt.clone(); ++ continue; ++ } ++ let (mut igtmp, err) = ig.add_child_path(parent); ++ errs.maybe_push(err); ++ igtmp.is_absolute_parent = true; ++ igtmp.absolute_base = Some(absolute_base.clone()); ++ igtmp.has_git = parent.join(".git").exists(); ++ ig = Ignore(Arc::new(igtmp)); ++ compiled.insert(parent.as_os_str().to_os_string(), ig.clone()); ++ } ++ (ig, errs.into_error_option()) ++ } ++ ++ /// Create a new `Ignore` matcher for the given child directory. ++ /// ++ /// Since building the matcher may require reading from multiple ++ /// files, it's possible that this method partially succeeds. Therefore, ++ /// a matcher is always returned (which may match nothing) and an error is ++ /// returned if it exists. ++ /// ++ /// Note that all I/O errors are completely ignored. ++ pub fn add_child>( ++ &self, ++ dir: P, ++ ) -> (Ignore, Option) { ++ let (ig, err) = self.add_child_path(dir.as_ref()); ++ (Ignore(Arc::new(ig)), err) ++ } ++ ++ /// Like add_child, but takes a full path and returns an IgnoreInner. ++ fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option) { ++ let mut errs = PartialErrorBuilder::default(); ++ let custom_ig_matcher = ++ if self.0.custom_ignore_filenames.is_empty() { ++ Gitignore::empty() ++ } else { ++ let (m, err) = ++ create_gitignore(&dir, &self.0.custom_ignore_filenames); ++ errs.maybe_push(err); ++ m ++ }; ++ let ig_matcher = ++ if !self.0.opts.ignore { ++ Gitignore::empty() ++ } else { ++ let (m, err) = create_gitignore(&dir, &[".ignore"]); ++ errs.maybe_push(err); ++ m ++ }; ++ let gi_matcher = ++ if !self.0.opts.git_ignore { ++ Gitignore::empty() ++ } else { ++ let (m, err) = create_gitignore(&dir, &[".gitignore"]); ++ errs.maybe_push(err); ++ m ++ }; ++ let gi_exclude_matcher = ++ if !self.0.opts.git_exclude { ++ Gitignore::empty() ++ } else { ++ let (m, err) = create_gitignore(&dir, &[".git/info/exclude"]); ++ errs.maybe_push(err); ++ m ++ }; ++ let ig = IgnoreInner { ++ compiled: self.0.compiled.clone(), ++ dir: dir.to_path_buf(), ++ overrides: self.0.overrides.clone(), ++ types: self.0.types.clone(), ++ parent: Some(self.clone()), ++ is_absolute_parent: false, ++ absolute_base: self.0.absolute_base.clone(), ++ explicit_ignores: self.0.explicit_ignores.clone(), ++ custom_ignore_filenames: self.0.custom_ignore_filenames.clone(), ++ custom_ignore_matcher: custom_ig_matcher, ++ ignore_matcher: ig_matcher, ++ git_global_matcher: self.0.git_global_matcher.clone(), ++ git_ignore_matcher: gi_matcher, ++ git_exclude_matcher: gi_exclude_matcher, ++ has_git: dir.join(".git").exists(), ++ opts: self.0.opts, ++ }; ++ (ig, errs.into_error_option()) ++ } ++ ++ /// Returns true if at least one type of ignore rule should be matched. ++ fn has_any_ignore_rules(&self) -> bool { ++ let opts = self.0.opts; ++ let has_custom_ignore_files = !self.0.custom_ignore_filenames.is_empty(); ++ let has_explicit_ignores = !self.0.explicit_ignores.is_empty(); ++ ++ opts.ignore || opts.git_global || opts.git_ignore ++ || opts.git_exclude || has_custom_ignore_files ++ || has_explicit_ignores ++ } ++ ++ /// Returns a match indicating whether the given file path should be ++ /// ignored or not. ++ /// ++ /// The match contains information about its origin. ++ pub fn matched<'a, P: AsRef>( ++ &'a self, ++ path: P, ++ is_dir: bool, ++ ) -> Match> { ++ // We need to be careful with our path. If it has a leading ./, then ++ // strip it because it causes nothing but trouble. ++ let mut path = path.as_ref(); ++ if let Some(p) = strip_prefix("./", path) { ++ path = p; ++ } ++ // Match against the override patterns. If an override matches ++ // regardless of whether it's whitelist/ignore, then we quit and ++ // return that result immediately. Overrides have the highest ++ // precedence. ++ if !self.0.overrides.is_empty() { ++ let mat = ++ self.0.overrides.matched(path, is_dir) ++ .map(IgnoreMatch::overrides); ++ if !mat.is_none() { ++ return mat; ++ } ++ } ++ let mut whitelisted = Match::None; ++ if self.has_any_ignore_rules() { ++ let mat = self.matched_ignore(path, is_dir); ++ if mat.is_ignore() { ++ return mat; ++ } else if mat.is_whitelist() { ++ whitelisted = mat; ++ } ++ } ++ if !self.0.types.is_empty() { ++ let mat = ++ self.0.types.matched(path, is_dir).map(IgnoreMatch::types); ++ if mat.is_ignore() { ++ return mat; ++ } else if mat.is_whitelist() { ++ whitelisted = mat; ++ } ++ } ++ if whitelisted.is_none() && self.0.opts.hidden && is_hidden(path) { ++ return Match::Ignore(IgnoreMatch::hidden()); ++ } ++ whitelisted ++ } ++ ++ /// Performs matching only on the ignore files for this directory and ++ /// all parent directories. ++ fn matched_ignore<'a>( ++ &'a self, ++ path: &Path, ++ is_dir: bool, ++ ) -> Match> { ++ let (mut m_custom_ignore, mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) = ++ (Match::None, Match::None, Match::None, Match::None, Match::None); ++ let any_git = self.parents().any(|ig| ig.0.has_git); ++ let mut saw_git = false; ++ for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) { ++ if m_custom_ignore.is_none() { ++ m_custom_ignore = ++ ig.0.custom_ignore_matcher.matched(path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if m_ignore.is_none() { ++ m_ignore = ++ ig.0.ignore_matcher.matched(path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if any_git && !saw_git && m_gi.is_none() { ++ m_gi = ++ ig.0.git_ignore_matcher.matched(path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if any_git && !saw_git && m_gi_exclude.is_none() { ++ m_gi_exclude = ++ ig.0.git_exclude_matcher.matched(path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ saw_git = saw_git || ig.0.has_git; ++ } ++ if self.0.opts.parents { ++ if let Some(abs_parent_path) = self.absolute_base() { ++ let path = abs_parent_path.join(path); ++ for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) { ++ if m_custom_ignore.is_none() { ++ m_custom_ignore = ++ ig.0.custom_ignore_matcher.matched(&path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if m_ignore.is_none() { ++ m_ignore = ++ ig.0.ignore_matcher.matched(&path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if any_git && !saw_git && m_gi.is_none() { ++ m_gi = ++ ig.0.git_ignore_matcher.matched(&path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ if any_git && !saw_git && m_gi_exclude.is_none() { ++ m_gi_exclude = ++ ig.0.git_exclude_matcher.matched(&path, is_dir) ++ .map(IgnoreMatch::gitignore); ++ } ++ saw_git = saw_git || ig.0.has_git; ++ } ++ } ++ } ++ for gi in self.0.explicit_ignores.iter().rev() { ++ if !m_explicit.is_none() { ++ break; ++ } ++ m_explicit = gi.matched(&path, is_dir).map(IgnoreMatch::gitignore); ++ } ++ let m_global = ++ if any_git { ++ self.0.git_global_matcher ++ .matched(&path, is_dir) ++ .map(IgnoreMatch::gitignore) ++ } else { ++ Match::None ++ }; ++ ++ m_custom_ignore.or(m_ignore).or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit) ++ } ++ ++ /// Returns an iterator over parent ignore matchers, including this one. ++ pub fn parents(&self) -> Parents { ++ Parents(Some(self)) ++ } ++ ++ /// Returns the first absolute path of the first absolute parent, if ++ /// one exists. ++ fn absolute_base(&self) -> Option<&Path> { ++ self.0.absolute_base.as_ref().map(|p| &***p) ++ } ++} ++ ++/// An iterator over all parents of an ignore matcher, including itself. ++/// ++/// The lifetime `'a` refers to the lifetime of the initial `Ignore` matcher. ++pub struct Parents<'a>(Option<&'a Ignore>); ++ ++impl<'a> Iterator for Parents<'a> { ++ type Item = &'a Ignore; ++ ++ fn next(&mut self) -> Option<&'a Ignore> { ++ match self.0.take() { ++ None => None, ++ Some(ig) => { ++ self.0 = ig.0.parent.as_ref(); ++ Some(ig) ++ } ++ } ++ } ++} ++ ++/// A builder for creating an Ignore matcher. ++#[derive(Clone, Debug)] ++pub struct IgnoreBuilder { ++ /// The root directory path for this ignore matcher. ++ dir: PathBuf, ++ /// An override matcher (default is empty). ++ overrides: Arc, ++ /// A type matcher (default is empty). ++ types: Arc, ++ /// Explicit global ignore matchers. ++ explicit_ignores: Vec, ++ /// Ignore files in addition to .ignore. ++ custom_ignore_filenames: Vec, ++ /// Ignore config. ++ opts: IgnoreOptions, ++} ++ ++impl IgnoreBuilder { ++ /// Create a new builder for an `Ignore` matcher. ++ /// ++ /// All relative file paths are resolved with respect to the current ++ /// working directory. ++ pub fn new() -> IgnoreBuilder { ++ IgnoreBuilder { ++ dir: Path::new("").to_path_buf(), ++ overrides: Arc::new(Override::empty()), ++ types: Arc::new(Types::empty()), ++ explicit_ignores: vec![], ++ custom_ignore_filenames: vec![], ++ opts: IgnoreOptions { ++ hidden: true, ++ ignore: true, ++ parents: true, ++ git_global: true, ++ git_ignore: true, ++ git_exclude: true, ++ }, ++ } ++ } ++ ++ /// Builds a new `Ignore` matcher. ++ /// ++ /// The matcher returned won't match anything until ignore rules from ++ /// directories are added to it. ++ pub fn build(&self) -> Ignore { ++ let git_global_matcher = ++ if !self.opts.git_global { ++ Gitignore::empty() ++ } else { ++ let (gi, err) = Gitignore::global(); ++ if let Some(err) = err { ++ debug!("{}", err); ++ } ++ gi ++ }; ++ ++ Ignore(Arc::new(IgnoreInner { ++ compiled: Arc::new(RwLock::new(HashMap::new())), ++ dir: self.dir.clone(), ++ overrides: self.overrides.clone(), ++ types: self.types.clone(), ++ parent: None, ++ is_absolute_parent: true, ++ absolute_base: None, ++ explicit_ignores: Arc::new(self.explicit_ignores.clone()), ++ custom_ignore_filenames: Arc::new(self.custom_ignore_filenames.clone()), ++ custom_ignore_matcher: Gitignore::empty(), ++ ignore_matcher: Gitignore::empty(), ++ git_global_matcher: Arc::new(git_global_matcher), ++ git_ignore_matcher: Gitignore::empty(), ++ git_exclude_matcher: Gitignore::empty(), ++ has_git: false, ++ opts: self.opts, ++ })) ++ } ++ ++ /// Add an override matcher. ++ /// ++ /// By default, no override matcher is used. ++ /// ++ /// This overrides any previous setting. ++ pub fn overrides(&mut self, overrides: Override) -> &mut IgnoreBuilder { ++ self.overrides = Arc::new(overrides); ++ self ++ } ++ ++ /// Add a file type matcher. ++ /// ++ /// By default, no file type matcher is used. ++ /// ++ /// This overrides any previous setting. ++ pub fn types(&mut self, types: Types) -> &mut IgnoreBuilder { ++ self.types = Arc::new(types); ++ self ++ } ++ ++ /// Adds a new global ignore matcher from the ignore file path given. ++ pub fn add_ignore(&mut self, ig: Gitignore) -> &mut IgnoreBuilder { ++ self.explicit_ignores.push(ig); ++ self ++ } ++ ++ /// Add a custom ignore file name ++ /// ++ /// These ignore files have higher precedence than all other ignore files. ++ /// ++ /// When specifying multiple names, earlier names have lower precedence than ++ /// later names. ++ pub fn add_custom_ignore_filename>( ++ &mut self, ++ file_name: S ++ ) -> &mut IgnoreBuilder { ++ self.custom_ignore_filenames.push(file_name.as_ref().to_os_string()); ++ self ++ } ++ ++ /// Enables ignoring hidden files. ++ /// ++ /// This is enabled by default. ++ pub fn hidden(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.hidden = yes; ++ self ++ } ++ ++ /// Enables reading `.ignore` files. ++ /// ++ /// `.ignore` files have the same semantics as `gitignore` files and are ++ /// supported by search tools such as ripgrep and The Silver Searcher. ++ /// ++ /// This is enabled by default. ++ pub fn ignore(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.ignore = yes; ++ self ++ } ++ ++ /// Enables reading ignore files from parent directories. ++ /// ++ /// If this is enabled, then .gitignore files in parent directories of each ++ /// file path given are respected. Otherwise, they are ignored. ++ /// ++ /// This is enabled by default. ++ pub fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.parents = yes; ++ self ++ } ++ ++ /// Add a global gitignore matcher. ++ /// ++ /// Its precedence is lower than both normal `.gitignore` files and ++ /// `.git/info/exclude` files. ++ /// ++ /// This overwrites any previous global gitignore setting. ++ /// ++ /// This is enabled by default. ++ pub fn git_global(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.git_global = yes; ++ self ++ } ++ ++ /// Enables reading `.gitignore` files. ++ /// ++ /// `.gitignore` files have match semantics as described in the `gitignore` ++ /// man page. ++ /// ++ /// This is enabled by default. ++ pub fn git_ignore(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.git_ignore = yes; ++ self ++ } ++ ++ /// Enables reading `.git/info/exclude` files. ++ /// ++ /// `.git/info/exclude` files have match semantics as described in the ++ /// `gitignore` man page. ++ /// ++ /// This is enabled by default. ++ pub fn git_exclude(&mut self, yes: bool) -> &mut IgnoreBuilder { ++ self.opts.git_exclude = yes; ++ self ++ } ++} ++ ++/// Creates a new gitignore matcher for the directory given. ++/// ++/// Ignore globs are extracted from each of the file names in `dir` in the ++/// order given (earlier names have lower precedence than later names). ++/// ++/// I/O errors are ignored. ++pub fn create_gitignore>( ++ dir: &Path, ++ names: &[T], ++) -> (Gitignore, Option) { ++ let mut builder = GitignoreBuilder::new(dir); ++ let mut errs = PartialErrorBuilder::default(); ++ for name in names { ++ let gipath = dir.join(name.as_ref()); ++ errs.maybe_push_ignore_io(builder.add(gipath)); ++ } ++ let gi = match builder.build() { ++ Ok(gi) => gi, ++ Err(err) => { ++ errs.push(err); ++ GitignoreBuilder::new(dir).build().unwrap() ++ } ++ }; ++ (gi, errs.into_error_option()) ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::{self, File}; ++ use std::io::Write; ++ use std::path::Path; ++ ++ use tempdir::TempDir; ++ ++ use dir::IgnoreBuilder; ++ use gitignore::Gitignore; ++ use Error; ++ ++ fn wfile>(path: P, contents: &str) { ++ let mut file = File::create(path).unwrap(); ++ file.write_all(contents.as_bytes()).unwrap(); ++ } ++ ++ fn mkdirp>(path: P) { ++ fs::create_dir_all(path).unwrap(); ++ } ++ ++ fn partial(err: Error) -> Vec { ++ match err { ++ Error::Partial(errs) => errs, ++ _ => panic!("expected partial error but got {:?}", err), ++ } ++ } ++ ++ #[test] ++ fn explicit_ignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join("not-an-ignore"), "foo\n!bar"); ++ ++ let (gi, err) = Gitignore::new(td.path().join("not-an-ignore")); ++ assert!(err.is_none()); ++ let (ig, err) = IgnoreBuilder::new() ++ .add_ignore(gi).build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_ignore()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ #[test] ++ fn git_exclude() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git/info")); ++ wfile(td.path().join(".git/info/exclude"), "foo\n!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_ignore()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ #[test] ++ fn gitignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ wfile(td.path().join(".gitignore"), "foo\n!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_ignore()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ #[test] ++ fn gitignore_no_git() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "foo\n!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_none()); ++ assert!(ig.matched("bar", false).is_none()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ #[test] ++ fn ignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".ignore"), "foo\n!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_ignore()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ #[test] ++ fn custom_ignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ let custom_ignore = ".customignore"; ++ wfile(td.path().join(custom_ignore), "foo\n!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new() ++ .add_custom_ignore_filename(custom_ignore) ++ .build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_ignore()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ assert!(ig.matched("baz", false).is_none()); ++ } ++ ++ // Tests that a custom ignore file will override an .ignore. ++ #[test] ++ fn custom_ignore_over_ignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ let custom_ignore = ".customignore"; ++ wfile(td.path().join(".ignore"), "foo"); ++ wfile(td.path().join(custom_ignore), "!foo"); ++ ++ let (ig, err) = IgnoreBuilder::new() ++ .add_custom_ignore_filename(custom_ignore) ++ .build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_whitelist()); ++ } ++ ++ // Tests that earlier custom ignore files have lower precedence than later. ++ #[test] ++ fn custom_ignore_precedence() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ let custom_ignore1 = ".customignore1"; ++ let custom_ignore2 = ".customignore2"; ++ wfile(td.path().join(custom_ignore1), "foo"); ++ wfile(td.path().join(custom_ignore2), "!foo"); ++ ++ let (ig, err) = IgnoreBuilder::new() ++ .add_custom_ignore_filename(custom_ignore1) ++ .add_custom_ignore_filename(custom_ignore2) ++ .build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_whitelist()); ++ } ++ ++ // Tests that an .ignore will override a .gitignore. ++ #[test] ++ fn ignore_over_gitignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "foo"); ++ wfile(td.path().join(".ignore"), "!foo"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("foo", false).is_whitelist()); ++ } ++ ++ // Tests that exclude has lower precedent than both .ignore and .gitignore. ++ #[test] ++ fn exclude_lowest() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "!foo"); ++ wfile(td.path().join(".ignore"), "!bar"); ++ mkdirp(td.path().join(".git/info")); ++ wfile(td.path().join(".git/info/exclude"), "foo\nbar\nbaz"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ assert!(ig.matched("baz", false).is_ignore()); ++ assert!(ig.matched("foo", false).is_whitelist()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ } ++ ++ #[test] ++ fn errored() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "f**oo"); ++ ++ let (_, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_some()); ++ } ++ ++ #[test] ++ fn errored_both() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "f**oo"); ++ wfile(td.path().join(".ignore"), "fo**o"); ++ ++ let (_, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert_eq!(2, partial(err.expect("an error")).len()); ++ } ++ ++ #[test] ++ fn errored_partial() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ wfile(td.path().join(".gitignore"), "f**oo\nbar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_some()); ++ assert!(ig.matched("bar", false).is_ignore()); ++ } ++ ++ #[test] ++ fn errored_partial_and_ignore() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ wfile(td.path().join(".gitignore"), "f**oo\nbar"); ++ wfile(td.path().join(".ignore"), "!bar"); ++ ++ let (ig, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_some()); ++ assert!(ig.matched("bar", false).is_whitelist()); ++ } ++ ++ #[test] ++ fn not_present_empty() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ ++ let (_, err) = IgnoreBuilder::new().build().add_child(td.path()); ++ assert!(err.is_none()); ++ } ++ ++ #[test] ++ fn stops_at_git_dir() { ++ // This tests that .gitignore files beyond a .git barrier aren't ++ // matched, but .ignore files are. ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ mkdirp(td.path().join("foo/.git")); ++ wfile(td.path().join(".gitignore"), "foo"); ++ wfile(td.path().join(".ignore"), "bar"); ++ ++ let ig0 = IgnoreBuilder::new().build(); ++ let (ig1, err) = ig0.add_child(td.path()); ++ assert!(err.is_none()); ++ let (ig2, err) = ig1.add_child(ig1.path().join("foo")); ++ assert!(err.is_none()); ++ ++ assert!(ig1.matched("foo", false).is_ignore()); ++ assert!(ig2.matched("foo", false).is_none()); ++ ++ assert!(ig1.matched("bar", false).is_ignore()); ++ assert!(ig2.matched("bar", false).is_ignore()); ++ } ++ ++ #[test] ++ fn absolute_parent() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ mkdirp(td.path().join("foo")); ++ wfile(td.path().join(".gitignore"), "bar"); ++ ++ // First, check that the parent gitignore file isn't detected if the ++ // parent isn't added. This establishes a baseline. ++ let ig0 = IgnoreBuilder::new().build(); ++ let (ig1, err) = ig0.add_child(td.path().join("foo")); ++ assert!(err.is_none()); ++ assert!(ig1.matched("bar", false).is_none()); ++ ++ // Second, check that adding a parent directory actually works. ++ let ig0 = IgnoreBuilder::new().build(); ++ let (ig1, err) = ig0.add_parents(td.path().join("foo")); ++ assert!(err.is_none()); ++ let (ig2, err) = ig1.add_child(td.path().join("foo")); ++ assert!(err.is_none()); ++ assert!(ig2.matched("bar", false).is_ignore()); ++ } ++ ++ #[test] ++ fn absolute_parent_anchored() { ++ let td = TempDir::new("ignore-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ mkdirp(td.path().join("src/llvm")); ++ wfile(td.path().join(".gitignore"), "/llvm/\nfoo"); ++ ++ let ig0 = IgnoreBuilder::new().build(); ++ let (ig1, err) = ig0.add_parents(td.path().join("src")); ++ assert!(err.is_none()); ++ let (ig2, err) = ig1.add_child("src"); ++ assert!(err.is_none()); ++ ++ assert!(ig1.matched("llvm", true).is_none()); ++ assert!(ig2.matched("llvm", true).is_none()); ++ assert!(ig2.matched("src/llvm", true).is_none()); ++ assert!(ig2.matched("foo", false).is_ignore()); ++ assert!(ig2.matched("src/foo", false).is_ignore()); ++ } ++} diff --cc vendor/ignore-0.4.3/src/gitignore.rs index 000000000,000000000..2a3016b80 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/gitignore.rs @@@ -1,0 -1,0 +1,761 @@@ ++/*! ++The gitignore module provides a way to match globs from a gitignore file ++against file paths. ++ ++Note that this module implements the specification as described in the ++`gitignore` man page from scratch. That is, this module does *not* shell out to ++the `git` command line tool. ++*/ ++ ++use std::cell::RefCell; ++use std::env; ++use std::fs::File; ++use std::io::{self, BufRead, Read}; ++use std::path::{Path, PathBuf}; ++use std::str; ++use std::sync::Arc; ++ ++use globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder}; ++use regex::bytes::Regex; ++use thread_local::ThreadLocal; ++ ++use pathutil::{is_file_name, strip_prefix}; ++use {Error, Match, PartialErrorBuilder}; ++ ++/// Glob represents a single glob in a gitignore file. ++/// ++/// This is used to report information about the highest precedent glob that ++/// matched in one or more gitignore files. ++#[derive(Clone, Debug)] ++pub struct Glob { ++ /// The file path that this glob was extracted from. ++ from: Option, ++ /// The original glob string. ++ original: String, ++ /// The actual glob string used to convert to a regex. ++ actual: String, ++ /// Whether this is a whitelisted glob or not. ++ is_whitelist: bool, ++ /// Whether this glob should only match directories or not. ++ is_only_dir: bool, ++} ++ ++impl Glob { ++ /// Returns the file path that defined this glob. ++ pub fn from(&self) -> Option<&Path> { ++ self.from.as_ref().map(|p| &**p) ++ } ++ ++ /// The original glob as it was defined in a gitignore file. ++ pub fn original(&self) -> &str { ++ &self.original ++ } ++ ++ /// The actual glob that was compiled to respect gitignore ++ /// semantics. ++ pub fn actual(&self) -> &str { ++ &self.actual ++ } ++ ++ /// Whether this was a whitelisted glob or not. ++ pub fn is_whitelist(&self) -> bool { ++ self.is_whitelist ++ } ++ ++ /// Whether this glob must match a directory or not. ++ pub fn is_only_dir(&self) -> bool { ++ self.is_only_dir ++ } ++ ++ /// Returns true if and only if this glob has a `**/` prefix. ++ fn has_doublestar_prefix(&self) -> bool { ++ self.actual.starts_with("**/") ++ || (self.actual == "**" && self.is_only_dir) ++ } ++} ++ ++/// Gitignore is a matcher for the globs in one or more gitignore files ++/// in the same directory. ++#[derive(Clone, Debug)] ++pub struct Gitignore { ++ set: GlobSet, ++ root: PathBuf, ++ globs: Vec, ++ num_ignores: u64, ++ num_whitelists: u64, ++ matches: Option>>>>, ++} ++ ++impl Gitignore { ++ /// Creates a new gitignore matcher from the gitignore file path given. ++ /// ++ /// If it's desirable to include multiple gitignore files in a single ++ /// matcher, or read gitignore globs from a different source, then ++ /// use `GitignoreBuilder`. ++ /// ++ /// This always returns a valid matcher, even if it's empty. In particular, ++ /// a Gitignore file can be partially valid, e.g., when one glob is invalid ++ /// but the rest aren't. ++ /// ++ /// Note that I/O errors are ignored. For more granular control over ++ /// errors, use `GitignoreBuilder`. ++ pub fn new>( ++ gitignore_path: P, ++ ) -> (Gitignore, Option) { ++ let path = gitignore_path.as_ref(); ++ let parent = path.parent().unwrap_or(Path::new("/")); ++ let mut builder = GitignoreBuilder::new(parent); ++ let mut errs = PartialErrorBuilder::default(); ++ errs.maybe_push_ignore_io(builder.add(path)); ++ match builder.build() { ++ Ok(gi) => (gi, errs.into_error_option()), ++ Err(err) => { ++ errs.push(err); ++ (Gitignore::empty(), errs.into_error_option()) ++ } ++ } ++ } ++ ++ /// Creates a new gitignore matcher from the global ignore file, if one ++ /// exists. ++ /// ++ /// The global config file path is specified by git's `core.excludesFile` ++ /// config option. ++ /// ++ /// Git's config file location is `$HOME/.gitconfig`. If `$HOME/.gitconfig` ++ /// does not exist or does not specify `core.excludesFile`, then ++ /// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not ++ /// set or is empty, then `$HOME/.config/git/ignore` is used instead. ++ pub fn global() -> (Gitignore, Option) { ++ match gitconfig_excludes_path() { ++ None => (Gitignore::empty(), None), ++ Some(path) => { ++ if !path.is_file() { ++ (Gitignore::empty(), None) ++ } else { ++ Gitignore::new(path) ++ } ++ } ++ } ++ } ++ ++ /// Creates a new empty gitignore matcher that never matches anything. ++ /// ++ /// Its path is empty. ++ pub fn empty() -> Gitignore { ++ Gitignore { ++ set: GlobSet::empty(), ++ root: PathBuf::from(""), ++ globs: vec![], ++ num_ignores: 0, ++ num_whitelists: 0, ++ matches: None, ++ } ++ } ++ ++ /// Returns the directory containing this gitignore matcher. ++ /// ++ /// All matches are done relative to this path. ++ pub fn path(&self) -> &Path { ++ &*self.root ++ } ++ ++ /// Returns true if and only if this gitignore has zero globs, and ++ /// therefore never matches any file path. ++ pub fn is_empty(&self) -> bool { ++ self.set.is_empty() ++ } ++ ++ /// Returns the total number of globs, which should be equivalent to ++ /// `num_ignores + num_whitelists`. ++ pub fn len(&self) -> usize { ++ self.set.len() ++ } ++ ++ /// Returns the total number of ignore globs. ++ pub fn num_ignores(&self) -> u64 { ++ self.num_ignores ++ } ++ ++ /// Returns the total number of whitelisted globs. ++ pub fn num_whitelists(&self) -> u64 { ++ self.num_whitelists ++ } ++ ++ /// Returns whether the given path (file or directory) matched a pattern in ++ /// this gitignore matcher. ++ /// ++ /// `is_dir` should be true if the path refers to a directory and false ++ /// otherwise. ++ /// ++ /// The given path is matched relative to the path given when building ++ /// the matcher. Specifically, before matching `path`, its prefix (as ++ /// determined by a common suffix of the directory containing this ++ /// gitignore) is stripped. If there is no common suffix/prefix overlap, ++ /// then `path` is assumed to be relative to this matcher. ++ pub fn matched>( ++ &self, ++ path: P, ++ is_dir: bool, ++ ) -> Match<&Glob> { ++ if self.is_empty() { ++ return Match::None; ++ } ++ self.matched_stripped(self.strip(path.as_ref()), is_dir) ++ } ++ ++ /// Returns whether the given path (file or directory, and expected to be ++ /// under the root) or any of its parent directories (up to the root) ++ /// matched a pattern in this gitignore matcher. ++ /// ++ /// NOTE: This method is more expensive than walking the directory hierarchy ++ /// top-to-bottom and matching the entries. But, is easier to use in cases ++ /// when a list of paths are available without a hierarchy. ++ /// ++ /// `is_dir` should be true if the path refers to a directory and false ++ /// otherwise. ++ /// ++ /// The given path is matched relative to the path given when building ++ /// the matcher. Specifically, before matching `path`, its prefix (as ++ /// determined by a common suffix of the directory containing this ++ /// gitignore) is stripped. If there is no common suffix/prefix overlap, ++ /// then `path` is assumed to be relative to this matcher. ++ pub fn matched_path_or_any_parents>( ++ &self, ++ path: P, ++ is_dir: bool, ++ ) -> Match<&Glob> { ++ if self.is_empty() { ++ return Match::None; ++ } ++ let mut path = self.strip(path.as_ref()); ++ debug_assert!( ++ !path.has_root(), ++ "path is expect to be under the root" ++ ); ++ match self.matched_stripped(path, is_dir) { ++ Match::None => (), // walk up ++ a_match => return a_match, ++ } ++ while let Some(parent) = path.parent() { ++ match self.matched_stripped(parent, /* is_dir */ true) { ++ Match::None => path = parent, // walk up ++ a_match => return a_match, ++ } ++ } ++ Match::None ++ } ++ ++ /// Like matched, but takes a path that has already been stripped. ++ fn matched_stripped>( ++ &self, ++ path: P, ++ is_dir: bool, ++ ) -> Match<&Glob> { ++ if self.is_empty() { ++ return Match::None; ++ } ++ let path = path.as_ref(); ++ let _matches = self.matches.as_ref().unwrap().get_default(); ++ let mut matches = _matches.borrow_mut(); ++ let candidate = Candidate::new(path); ++ self.set.matches_candidate_into(&candidate, &mut *matches); ++ for &i in matches.iter().rev() { ++ let glob = &self.globs[i]; ++ if !glob.is_only_dir() || is_dir { ++ return if glob.is_whitelist() { ++ Match::Whitelist(glob) ++ } else { ++ Match::Ignore(glob) ++ }; ++ } ++ } ++ Match::None ++ } ++ ++ /// Strips the given path such that it's suitable for matching with this ++ /// gitignore matcher. ++ fn strip<'a, P: 'a + AsRef + ?Sized>( ++ &'a self, ++ path: &'a P, ++ ) -> &'a Path { ++ let mut path = path.as_ref(); ++ // A leading ./ is completely superfluous. We also strip it from ++ // our gitignore root path, so we need to strip it from our candidate ++ // path too. ++ if let Some(p) = strip_prefix("./", path) { ++ path = p; ++ } ++ // Strip any common prefix between the candidate path and the root ++ // of the gitignore, to make sure we get relative matching right. ++ // BUT, a file name might not have any directory components to it, ++ // in which case, we don't want to accidentally strip any part of the ++ // file name. ++ // ++ // As an additional special case, if the root is just `.`, then we ++ // shouldn't try to strip anything, e.g., when path begins with a `.`. ++ if self.root != Path::new(".") && !is_file_name(path) { ++ if let Some(p) = strip_prefix(&self.root, path) { ++ path = p; ++ // If we're left with a leading slash, get rid of it. ++ if let Some(p) = strip_prefix("/", path) { ++ path = p; ++ } ++ } ++ } ++ path ++ } ++} ++ ++/// Builds a matcher for a single set of globs from a .gitignore file. ++#[derive(Clone, Debug)] ++pub struct GitignoreBuilder { ++ builder: GlobSetBuilder, ++ root: PathBuf, ++ globs: Vec, ++ case_insensitive: bool, ++} ++ ++impl GitignoreBuilder { ++ /// Create a new builder for a gitignore file. ++ /// ++ /// The path given should be the path at which the globs for this gitignore ++ /// file should be matched. Note that paths are always matched relative ++ /// to the root path given here. Generally, the root path should correspond ++ /// to the *directory* containing a `.gitignore` file. ++ pub fn new>(root: P) -> GitignoreBuilder { ++ let root = root.as_ref(); ++ GitignoreBuilder { ++ builder: GlobSetBuilder::new(), ++ root: strip_prefix("./", root).unwrap_or(root).to_path_buf(), ++ globs: vec![], ++ case_insensitive: false, ++ } ++ } ++ ++ /// Builds a new matcher from the globs added so far. ++ /// ++ /// Once a matcher is built, no new globs can be added to it. ++ pub fn build(&self) -> Result { ++ let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count(); ++ let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count(); ++ let set = ++ self.builder.build().map_err(|err| { ++ Error::Glob { ++ glob: None, ++ err: err.to_string(), ++ } ++ })?; ++ Ok(Gitignore { ++ set: set, ++ root: self.root.clone(), ++ globs: self.globs.clone(), ++ num_ignores: nignore as u64, ++ num_whitelists: nwhite as u64, ++ matches: Some(Arc::new(ThreadLocal::default())), ++ }) ++ } ++ ++ /// Add each glob from the file path given. ++ /// ++ /// The file given should be formatted as a `gitignore` file. ++ /// ++ /// Note that partial errors can be returned. For example, if there was ++ /// a problem adding one glob, an error for that will be returned, but ++ /// all other valid globs will still be added. ++ pub fn add>(&mut self, path: P) -> Option { ++ let path = path.as_ref(); ++ let file = match File::open(path) { ++ Err(err) => return Some(Error::Io(err).with_path(path)), ++ Ok(file) => file, ++ }; ++ let rdr = io::BufReader::new(file); ++ let mut errs = PartialErrorBuilder::default(); ++ for (i, line) in rdr.lines().enumerate() { ++ let lineno = (i + 1) as u64; ++ let line = match line { ++ Ok(line) => line, ++ Err(err) => { ++ errs.push(Error::Io(err).tagged(path, lineno)); ++ break; ++ } ++ }; ++ if let Err(err) = self.add_line(Some(path.to_path_buf()), &line) { ++ errs.push(err.tagged(path, lineno)); ++ } ++ } ++ errs.into_error_option() ++ } ++ ++ /// Add each glob line from the string given. ++ /// ++ /// If this string came from a particular `gitignore` file, then its path ++ /// should be provided here. ++ /// ++ /// The string given should be formatted as a `gitignore` file. ++ #[cfg(test)] ++ fn add_str( ++ &mut self, ++ from: Option, ++ gitignore: &str, ++ ) -> Result<&mut GitignoreBuilder, Error> { ++ for line in gitignore.lines() { ++ self.add_line(from.clone(), line)?; ++ } ++ Ok(self) ++ } ++ ++ /// Add a line from a gitignore file to this builder. ++ /// ++ /// If this line came from a particular `gitignore` file, then its path ++ /// should be provided here. ++ /// ++ /// If the line could not be parsed as a glob, then an error is returned. ++ pub fn add_line( ++ &mut self, ++ from: Option, ++ mut line: &str, ++ ) -> Result<&mut GitignoreBuilder, Error> { ++ if line.starts_with("#") { ++ return Ok(self); ++ } ++ if !line.ends_with("\\ ") { ++ line = line.trim_right(); ++ } ++ if line.is_empty() { ++ return Ok(self); ++ } ++ let mut glob = Glob { ++ from: from, ++ original: line.to_string(), ++ actual: String::new(), ++ is_whitelist: false, ++ is_only_dir: false, ++ }; ++ let mut literal_separator = false; ++ let mut is_absolute = false; ++ if line.starts_with("\\!") || line.starts_with("\\#") { ++ line = &line[1..]; ++ is_absolute = line.chars().nth(0) == Some('/'); ++ } else { ++ if line.starts_with("!") { ++ glob.is_whitelist = true; ++ line = &line[1..]; ++ } ++ if line.starts_with("/") { ++ // `man gitignore` says that if a glob starts with a slash, ++ // then the glob can only match the beginning of a path ++ // (relative to the location of gitignore). We achieve this by ++ // simply banning wildcards from matching /. ++ literal_separator = true; ++ line = &line[1..]; ++ is_absolute = true; ++ } ++ } ++ // If it ends with a slash, then this should only match directories, ++ // but the slash should otherwise not be used while globbing. ++ if let Some((i, c)) = line.char_indices().rev().nth(0) { ++ if c == '/' { ++ glob.is_only_dir = true; ++ line = &line[..i]; ++ } ++ } ++ // If there is a literal slash, then we note that so that globbing ++ // doesn't let wildcards match slashes. ++ glob.actual = line.to_string(); ++ if is_absolute || line.chars().any(|c| c == '/') { ++ literal_separator = true; ++ } ++ // If there was a slash, then this is a glob that must match the entire ++ // path name. Otherwise, we should let it match anywhere, so use a **/ ++ // prefix. ++ if !literal_separator { ++ // ... but only if we don't already have a **/ prefix. ++ if !glob.has_doublestar_prefix() { ++ glob.actual = format!("**/{}", glob.actual); ++ } ++ } ++ // If the glob ends with `/**`, then we should only match everything ++ // inside a directory, but not the directory itself. Standard globs ++ // will match the directory. So we add `/*` to force the issue. ++ if glob.actual.ends_with("/**") { ++ glob.actual = format!("{}/*", glob.actual); ++ } ++ let parsed = ++ GlobBuilder::new(&glob.actual) ++ .literal_separator(literal_separator) ++ .case_insensitive(self.case_insensitive) ++ .backslash_escape(true) ++ .build() ++ .map_err(|err| { ++ Error::Glob { ++ glob: Some(glob.original.clone()), ++ err: err.kind().to_string(), ++ } ++ })?; ++ self.builder.add(parsed); ++ self.globs.push(glob); ++ Ok(self) ++ } ++ ++ /// Toggle whether the globs should be matched case insensitively or not. ++ /// ++ /// When this option is changed, only globs added after the change will be affected. ++ /// ++ /// This is disabled by default. ++ pub fn case_insensitive( ++ &mut self, yes: bool ++ ) -> Result<&mut GitignoreBuilder, Error> { ++ self.case_insensitive = yes; ++ Ok(self) ++ } ++} ++ ++/// Return the file path of the current environment's global gitignore file. ++/// ++/// Note that the file path returned may not exist. ++fn gitconfig_excludes_path() -> Option { ++ // git supports $HOME/.gitconfig and $XDG_CONFIG_DIR/git/config. Notably, ++ // both can be active at the same time, where $HOME/.gitconfig takes ++ // precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then ++ // we're done. ++ match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) { ++ Some(path) => return Some(path), ++ None => {} ++ } ++ match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) { ++ Some(path) => return Some(path), ++ None => {} ++ } ++ excludes_file_default() ++} ++ ++/// Returns the file contents of git's global config file, if one exists, in ++/// the user's home directory. ++fn gitconfig_home_contents() -> Option> { ++ let home = match home_dir() { ++ None => return None, ++ Some(home) => home, ++ }; ++ let mut file = match File::open(home.join(".gitconfig")) { ++ Err(_) => return None, ++ Ok(file) => io::BufReader::new(file), ++ }; ++ let mut contents = vec![]; ++ file.read_to_end(&mut contents).ok().map(|_| contents) ++} ++ ++/// Returns the file contents of git's global config file, if one exists, in ++/// the user's XDG_CONFIG_DIR directory. ++fn gitconfig_xdg_contents() -> Option> { ++ let path = env::var_os("XDG_CONFIG_HOME") ++ .and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) }) ++ .or_else(|| home_dir().map(|p| p.join(".config"))) ++ .map(|x| x.join("git/config")); ++ let mut file = match path.and_then(|p| File::open(p).ok()) { ++ None => return None, ++ Some(file) => io::BufReader::new(file), ++ }; ++ let mut contents = vec![]; ++ file.read_to_end(&mut contents).ok().map(|_| contents) ++} ++ ++/// Returns the default file path for a global .gitignore file. ++/// ++/// Specifically, this respects XDG_CONFIG_HOME. ++fn excludes_file_default() -> Option { ++ env::var_os("XDG_CONFIG_HOME") ++ .and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) }) ++ .or_else(|| home_dir().map(|p| p.join(".config"))) ++ .map(|x| x.join("git/ignore")) ++} ++ ++/// Extract git's `core.excludesfile` config setting from the raw file contents ++/// given. ++fn parse_excludes_file(data: &[u8]) -> Option { ++ // N.B. This is the lazy approach, and isn't technically correct, but ++ // probably works in more circumstances. I guess we would ideally have ++ // a full INI parser. Yuck. ++ lazy_static! { ++ static ref RE: Regex = Regex::new( ++ r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$" ++ ).unwrap(); ++ }; ++ let caps = match RE.captures(data) { ++ None => return None, ++ Some(caps) => caps, ++ }; ++ str::from_utf8(&caps[1]).ok().map(|s| PathBuf::from(expand_tilde(s))) ++} ++ ++/// Expands ~ in file paths to the value of $HOME. ++fn expand_tilde(path: &str) -> String { ++ let home = match home_dir() { ++ None => return path.to_string(), ++ Some(home) => home.to_string_lossy().into_owned(), ++ }; ++ path.replace("~", &home) ++} ++ ++/// Returns the location of the user's home directory. ++fn home_dir() -> Option { ++ // We're fine with using env::home_dir for now. Its bugs are, IMO, pretty ++ // minor corner cases. We should still probably eventually migrate to ++ // the `dirs` crate to get a proper implementation. ++ #![allow(deprecated)] ++ env::home_dir() ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::path::Path; ++ use super::{Gitignore, GitignoreBuilder}; ++ ++ fn gi_from_str>(root: P, s: &str) -> Gitignore { ++ let mut builder = GitignoreBuilder::new(root); ++ builder.add_str(None, s).unwrap(); ++ builder.build().unwrap() ++ } ++ ++ macro_rules! ignored { ++ ($name:ident, $root:expr, $gi:expr, $path:expr) => { ++ ignored!($name, $root, $gi, $path, false); ++ }; ++ ($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => { ++ #[test] ++ fn $name() { ++ let gi = gi_from_str($root, $gi); ++ assert!(gi.matched($path, $is_dir).is_ignore()); ++ } ++ }; ++ } ++ ++ macro_rules! not_ignored { ++ ($name:ident, $root:expr, $gi:expr, $path:expr) => { ++ not_ignored!($name, $root, $gi, $path, false); ++ }; ++ ($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => { ++ #[test] ++ fn $name() { ++ let gi = gi_from_str($root, $gi); ++ assert!(!gi.matched($path, $is_dir).is_ignore()); ++ } ++ }; ++ } ++ ++ const ROOT: &'static str = "/home/foobar/rust/rg"; ++ ++ ignored!(ig1, ROOT, "months", "months"); ++ ignored!(ig2, ROOT, "*.lock", "Cargo.lock"); ++ ignored!(ig3, ROOT, "*.rs", "src/main.rs"); ++ ignored!(ig4, ROOT, "src/*.rs", "src/main.rs"); ++ ignored!(ig5, ROOT, "/*.c", "cat-file.c"); ++ ignored!(ig6, ROOT, "/src/*.rs", "src/main.rs"); ++ ignored!(ig7, ROOT, "!src/main.rs\n*.rs", "src/main.rs"); ++ ignored!(ig8, ROOT, "foo/", "foo", true); ++ ignored!(ig9, ROOT, "**/foo", "foo"); ++ ignored!(ig10, ROOT, "**/foo", "src/foo"); ++ ignored!(ig11, ROOT, "**/foo/**", "src/foo/bar"); ++ ignored!(ig12, ROOT, "**/foo/**", "wat/src/foo/bar/baz"); ++ ignored!(ig13, ROOT, "**/foo/bar", "foo/bar"); ++ ignored!(ig14, ROOT, "**/foo/bar", "src/foo/bar"); ++ ignored!(ig15, ROOT, "abc/**", "abc/x"); ++ ignored!(ig16, ROOT, "abc/**", "abc/x/y"); ++ ignored!(ig17, ROOT, "abc/**", "abc/x/y/z"); ++ ignored!(ig18, ROOT, "a/**/b", "a/b"); ++ ignored!(ig19, ROOT, "a/**/b", "a/x/b"); ++ ignored!(ig20, ROOT, "a/**/b", "a/x/y/b"); ++ ignored!(ig21, ROOT, r"\!xy", "!xy"); ++ ignored!(ig22, ROOT, r"\#foo", "#foo"); ++ ignored!(ig23, ROOT, "foo", "./foo"); ++ ignored!(ig24, ROOT, "target", "grep/target"); ++ ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock"); ++ ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz"); ++ ignored!(ig27, ROOT, "foo/", "xyz/foo", true); ++ ignored!(ig28, "./src", "/llvm/", "./src/llvm", true); ++ ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true); ++ ignored!(ig30, ROOT, "**/", "foo/bar", true); ++ ignored!(ig31, ROOT, "path1/*", "path1/foo"); ++ ignored!(ig32, ROOT, ".a/b", ".a/b"); ++ ignored!(ig33, "./", ".a/b", ".a/b"); ++ ignored!(ig34, ".", ".a/b", ".a/b"); ++ ignored!(ig35, "./.", ".a/b", ".a/b"); ++ ignored!(ig36, "././", ".a/b", ".a/b"); ++ ignored!(ig37, "././.", ".a/b", ".a/b"); ++ ignored!(ig38, ROOT, "\\[", "["); ++ ignored!(ig39, ROOT, "\\?", "?"); ++ ignored!(ig40, ROOT, "\\*", "*"); ++ ignored!(ig41, ROOT, "\\a", "a"); ++ ++ not_ignored!(ignot1, ROOT, "amonths", "months"); ++ not_ignored!(ignot2, ROOT, "monthsa", "months"); ++ not_ignored!(ignot3, ROOT, "/src/*.rs", "src/grep/src/main.rs"); ++ not_ignored!(ignot4, ROOT, "/*.c", "mozilla-sha1/sha1.c"); ++ not_ignored!(ignot5, ROOT, "/src/*.rs", "src/grep/src/main.rs"); ++ not_ignored!(ignot6, ROOT, "*.rs\n!src/main.rs", "src/main.rs"); ++ not_ignored!(ignot7, ROOT, "foo/", "foo", false); ++ not_ignored!(ignot8, ROOT, "**/foo/**", "wat/src/afoo/bar/baz"); ++ not_ignored!(ignot9, ROOT, "**/foo/**", "wat/src/fooa/bar/baz"); ++ not_ignored!(ignot10, ROOT, "**/foo/bar", "foo/src/bar"); ++ not_ignored!(ignot11, ROOT, "#foo", "#foo"); ++ not_ignored!(ignot12, ROOT, "\n\n\n", "foo"); ++ not_ignored!(ignot13, ROOT, "foo/**", "foo", true); ++ not_ignored!( ++ ignot14, "./third_party/protobuf", "m4/ltoptions.m4", ++ "./third_party/protobuf/csharp/src/packages/repositories.config"); ++ not_ignored!(ignot15, ROOT, "!/bar", "foo/bar"); ++ not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true); ++ not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs"); ++ not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo"); ++ ++ fn bytes(s: &str) -> Vec { ++ s.to_string().into_bytes() ++ } ++ ++ fn path_string>(path: P) -> String { ++ path.as_ref().to_str().unwrap().to_string() ++ } ++ ++ #[test] ++ fn parse_excludes_file1() { ++ let data = bytes("[core]\nexcludesFile = /foo/bar"); ++ let got = super::parse_excludes_file(&data).unwrap(); ++ assert_eq!(path_string(got), "/foo/bar"); ++ } ++ ++ #[test] ++ fn parse_excludes_file2() { ++ let data = bytes("[core]\nexcludesFile = ~/foo/bar"); ++ let got = super::parse_excludes_file(&data).unwrap(); ++ assert_eq!(path_string(got), super::expand_tilde("~/foo/bar")); ++ } ++ ++ #[test] ++ fn parse_excludes_file3() { ++ let data = bytes("[core]\nexcludeFile = /foo/bar"); ++ assert!(super::parse_excludes_file(&data).is_none()); ++ } ++ ++ // See: https://github.com/BurntSushi/ripgrep/issues/106 ++ #[test] ++ fn regression_106() { ++ gi_from_str("/", " "); ++ } ++ ++ #[test] ++ fn case_insensitive() { ++ let gi = GitignoreBuilder::new(ROOT) ++ .case_insensitive(true).unwrap() ++ .add_str(None, "*.html").unwrap() ++ .build().unwrap(); ++ assert!(gi.matched("foo.html", false).is_ignore()); ++ assert!(gi.matched("foo.HTML", false).is_ignore()); ++ assert!(!gi.matched("foo.htm", false).is_ignore()); ++ assert!(!gi.matched("foo.HTM", false).is_ignore()); ++ } ++ ++ ignored!(cs1, ROOT, "*.html", "foo.html"); ++ not_ignored!(cs2, ROOT, "*.html", "foo.HTML"); ++ not_ignored!(cs3, ROOT, "*.html", "foo.htm"); ++ not_ignored!(cs4, ROOT, "*.html", "foo.HTM"); ++} diff --cc vendor/ignore-0.4.3/src/lib.rs index 000000000,000000000..b97e267a6 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/lib.rs @@@ -1,0 -1,0 +1,444 @@@ ++/*! ++The ignore crate provides a fast recursive directory iterator that respects ++various filters such as globs, file types and `.gitignore` files. The precise ++matching rules and precedence is explained in the documentation for ++`WalkBuilder`. ++ ++Secondarily, this crate exposes gitignore and file type matchers for use cases ++that demand more fine-grained control. ++ ++# Example ++ ++This example shows the most basic usage of this crate. This code will ++recursively traverse the current directory while automatically filtering out ++files and directories according to ignore globs found in files like ++`.ignore` and `.gitignore`: ++ ++ ++```rust,no_run ++use ignore::Walk; ++ ++for result in Walk::new("./") { ++ // Each item yielded by the iterator is either a directory entry or an ++ // error, so either print the path or the error. ++ match result { ++ Ok(entry) => println!("{}", entry.path().display()), ++ Err(err) => println!("ERROR: {}", err), ++ } ++} ++``` ++ ++# Example: advanced ++ ++By default, the recursive directory iterator will ignore hidden files and ++directories. This can be disabled by building the iterator with `WalkBuilder`: ++ ++```rust,no_run ++use ignore::WalkBuilder; ++ ++for result in WalkBuilder::new("./").hidden(false).build() { ++ println!("{:?}", result); ++} ++``` ++ ++See the documentation for `WalkBuilder` for many other options. ++*/ ++ ++#![deny(missing_docs)] ++ ++extern crate crossbeam; ++extern crate globset; ++#[macro_use] ++extern crate lazy_static; ++#[macro_use] ++extern crate log; ++extern crate memchr; ++extern crate regex; ++extern crate same_file; ++#[cfg(test)] ++extern crate tempdir; ++extern crate thread_local; ++extern crate walkdir; ++#[cfg(windows)] ++extern crate winapi; ++ ++use std::error; ++use std::fmt; ++use std::io; ++use std::path::{Path, PathBuf}; ++ ++pub use walk::{DirEntry, Walk, WalkBuilder, WalkParallel, WalkState}; ++ ++mod dir; ++pub mod gitignore; ++mod pathutil; ++pub mod overrides; ++pub mod types; ++mod walk; ++ ++/// Represents an error that can occur when parsing a gitignore file. ++#[derive(Debug)] ++pub enum Error { ++ /// A collection of "soft" errors. These occur when adding an ignore ++ /// file partially succeeded. ++ Partial(Vec), ++ /// An error associated with a specific line number. ++ WithLineNumber { ++ /// The line number. ++ line: u64, ++ /// The underlying error. ++ err: Box, ++ }, ++ /// An error associated with a particular file path. ++ WithPath { ++ /// The file path. ++ path: PathBuf, ++ /// The underlying error. ++ err: Box, ++ }, ++ /// An error associated with a particular directory depth when recursively ++ /// walking a directory. ++ WithDepth { ++ /// The directory depth. ++ depth: usize, ++ /// The underlying error. ++ err: Box, ++ }, ++ /// An error that occurs when a file loop is detected when traversing ++ /// symbolic links. ++ Loop { ++ /// The ancestor file path in the loop. ++ ancestor: PathBuf, ++ /// The child file path in the loop. ++ child: PathBuf, ++ }, ++ /// An error that occurs when doing I/O, such as reading an ignore file. ++ Io(io::Error), ++ /// An error that occurs when trying to parse a glob. ++ Glob { ++ /// The original glob that caused this error. This glob, when ++ /// available, always corresponds to the glob provided by an end user. ++ /// e.g., It is the glob as written in a `.gitignore` file. ++ /// ++ /// (This glob may be distinct from the glob that is actually ++ /// compiled, after accounting for `gitignore` semantics.) ++ glob: Option, ++ /// The underlying glob error as a string. ++ err: String, ++ }, ++ /// A type selection for a file type that is not defined. ++ UnrecognizedFileType(String), ++ /// A user specified file type definition could not be parsed. ++ InvalidDefinition, ++} ++ ++impl Clone for Error { ++ fn clone(&self) -> Error { ++ match *self { ++ Error::Partial(ref errs) => Error::Partial(errs.clone()), ++ Error::WithLineNumber { line, ref err } => { ++ Error::WithLineNumber { line: line, err: err.clone() } ++ } ++ Error::WithPath { ref path, ref err } => { ++ Error::WithPath { path: path.clone(), err: err.clone() } ++ } ++ Error::WithDepth { depth, ref err } => { ++ Error::WithDepth { depth: depth, err: err.clone() } ++ } ++ Error::Loop { ref ancestor, ref child } => { ++ Error::Loop { ++ ancestor: ancestor.clone(), ++ child: child.clone() ++ } ++ } ++ Error::Io(ref err) => { ++ match err.raw_os_error() { ++ Some(e) => Error::Io(io::Error::from_raw_os_error(e)), ++ None => { ++ Error::Io(io::Error::new(err.kind(), err.to_string())) ++ } ++ } ++ } ++ Error::Glob { ref glob, ref err } => { ++ Error::Glob { glob: glob.clone(), err: err.clone() } ++ } ++ Error::UnrecognizedFileType(ref err) => { ++ Error::UnrecognizedFileType(err.clone()) ++ } ++ Error::InvalidDefinition => Error::InvalidDefinition, ++ } ++ } ++} ++ ++impl Error { ++ /// Returns true if this is a partial error. ++ /// ++ /// A partial error occurs when only some operations failed while others ++ /// may have succeeded. For example, an ignore file may contain an invalid ++ /// glob among otherwise valid globs. ++ pub fn is_partial(&self) -> bool { ++ match *self { ++ Error::Partial(_) => true, ++ Error::WithLineNumber { ref err, .. } => err.is_partial(), ++ Error::WithPath { ref err, .. } => err.is_partial(), ++ Error::WithDepth { ref err, .. } => err.is_partial(), ++ _ => false, ++ } ++ } ++ ++ /// Returns true if this error is exclusively an I/O error. ++ pub fn is_io(&self) -> bool { ++ match *self { ++ Error::Partial(ref errs) => errs.len() == 1 && errs[0].is_io(), ++ Error::WithLineNumber { ref err, .. } => err.is_io(), ++ Error::WithPath { ref err, .. } => err.is_io(), ++ Error::WithDepth { ref err, .. } => err.is_io(), ++ Error::Loop { .. } => false, ++ Error::Io(_) => true, ++ Error::Glob { .. } => false, ++ Error::UnrecognizedFileType(_) => false, ++ Error::InvalidDefinition => false, ++ } ++ } ++ ++ /// Returns a depth associated with recursively walking a directory (if ++ /// this error was generated from a recursive directory iterator). ++ pub fn depth(&self) -> Option { ++ match *self { ++ Error::WithPath { ref err, .. } => err.depth(), ++ Error::WithDepth { depth, .. } => Some(depth), ++ _ => None, ++ } ++ } ++ ++ /// Turn an error into a tagged error with the given file path. ++ fn with_path>(self, path: P) -> Error { ++ Error::WithPath { ++ path: path.as_ref().to_path_buf(), ++ err: Box::new(self), ++ } ++ } ++ ++ /// Turn an error into a tagged error with the given depth. ++ fn with_depth(self, depth: usize) -> Error { ++ Error::WithDepth { ++ depth: depth, ++ err: Box::new(self), ++ } ++ } ++ ++ /// Turn an error into a tagged error with the given file path and line ++ /// number. If path is empty, then it is omitted from the error. ++ fn tagged>(self, path: P, lineno: u64) -> Error { ++ let errline = Error::WithLineNumber { ++ line: lineno, ++ err: Box::new(self), ++ }; ++ if path.as_ref().as_os_str().is_empty() { ++ return errline; ++ } ++ errline.with_path(path) ++ } ++ ++ /// Build an error from a walkdir error. ++ fn from_walkdir(err: walkdir::Error) -> Error { ++ let depth = err.depth(); ++ if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) { ++ return Error::WithDepth { ++ depth: depth, ++ err: Box::new(Error::Loop { ++ ancestor: anc.to_path_buf(), ++ child: child.to_path_buf(), ++ }), ++ }; ++ } ++ let path = err.path().map(|p| p.to_path_buf()); ++ let mut ig_err = Error::Io(io::Error::from(err)); ++ if let Some(path) = path { ++ ig_err = Error::WithPath { ++ path: path, ++ err: Box::new(ig_err), ++ }; ++ } ++ ig_err ++ } ++} ++ ++impl error::Error for Error { ++ fn description(&self) -> &str { ++ match *self { ++ Error::Partial(_) => "partial error", ++ Error::WithLineNumber { ref err, .. } => err.description(), ++ Error::WithPath { ref err, .. } => err.description(), ++ Error::WithDepth { ref err, .. } => err.description(), ++ Error::Loop { .. } => "file system loop found", ++ Error::Io(ref err) => err.description(), ++ Error::Glob { ref err, .. } => err, ++ Error::UnrecognizedFileType(_) => "unrecognized file type", ++ Error::InvalidDefinition => "invalid definition", ++ } ++ } ++} ++ ++impl fmt::Display for Error { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ match *self { ++ Error::Partial(ref errs) => { ++ let msgs: Vec = ++ errs.iter().map(|err| err.to_string()).collect(); ++ write!(f, "{}", msgs.join("\n")) ++ } ++ Error::WithLineNumber { line, ref err } => { ++ write!(f, "line {}: {}", line, err) ++ } ++ Error::WithPath { ref path, ref err } => { ++ write!(f, "{}: {}", path.display(), err) ++ } ++ Error::WithDepth { ref err, .. } => err.fmt(f), ++ Error::Loop { ref ancestor, ref child } => { ++ write!(f, "File system loop found: \ ++ {} points to an ancestor {}", ++ child.display(), ancestor.display()) ++ } ++ Error::Io(ref err) => err.fmt(f), ++ Error::Glob { glob: None, ref err } => write!(f, "{}", err), ++ Error::Glob { glob: Some(ref glob), ref err } => { ++ write!(f, "error parsing glob '{}': {}", glob, err) ++ } ++ Error::UnrecognizedFileType(ref ty) => { ++ write!(f, "unrecognized file type: {}", ty) ++ } ++ Error::InvalidDefinition => { ++ write!(f, "invalid definition (format is type:glob, e.g., \ ++ html:*.html)") ++ } ++ } ++ } ++} ++ ++impl From for Error { ++ fn from(err: io::Error) -> Error { ++ Error::Io(err) ++ } ++} ++ ++#[derive(Debug, Default)] ++struct PartialErrorBuilder(Vec); ++ ++impl PartialErrorBuilder { ++ fn push(&mut self, err: Error) { ++ self.0.push(err); ++ } ++ ++ fn push_ignore_io(&mut self, err: Error) { ++ if !err.is_io() { ++ self.push(err); ++ } ++ } ++ ++ fn maybe_push(&mut self, err: Option) { ++ if let Some(err) = err { ++ self.push(err); ++ } ++ } ++ ++ fn maybe_push_ignore_io(&mut self, err: Option) { ++ if let Some(err) = err { ++ self.push_ignore_io(err); ++ } ++ } ++ ++ fn into_error_option(mut self) -> Option { ++ if self.0.is_empty() { ++ None ++ } else if self.0.len() == 1 { ++ Some(self.0.pop().unwrap()) ++ } else { ++ Some(Error::Partial(self.0)) ++ } ++ } ++} ++ ++/// The result of a glob match. ++/// ++/// The type parameter `T` typically refers to a type that provides more ++/// information about a particular match. For example, it might identify ++/// the specific gitignore file and the specific glob pattern that caused ++/// the match. ++#[derive(Clone, Debug)] ++pub enum Match { ++ /// The path didn't match any glob. ++ None, ++ /// The highest precedent glob matched indicates the path should be ++ /// ignored. ++ Ignore(T), ++ /// The highest precedent glob matched indicates the path should be ++ /// whitelisted. ++ Whitelist(T), ++} ++ ++impl Match { ++ /// Returns true if the match result didn't match any globs. ++ pub fn is_none(&self) -> bool { ++ match *self { ++ Match::None => true, ++ Match::Ignore(_) | Match::Whitelist(_) => false, ++ } ++ } ++ ++ /// Returns true if the match result implies the path should be ignored. ++ pub fn is_ignore(&self) -> bool { ++ match *self { ++ Match::Ignore(_) => true, ++ Match::None | Match::Whitelist(_) => false, ++ } ++ } ++ ++ /// Returns true if the match result implies the path should be ++ /// whitelisted. ++ pub fn is_whitelist(&self) -> bool { ++ match *self { ++ Match::Whitelist(_) => true, ++ Match::None | Match::Ignore(_) => false, ++ } ++ } ++ ++ /// Inverts the match so that `Ignore` becomes `Whitelist` and ++ /// `Whitelist` becomes `Ignore`. A non-match remains the same. ++ pub fn invert(self) -> Match { ++ match self { ++ Match::None => Match::None, ++ Match::Ignore(t) => Match::Whitelist(t), ++ Match::Whitelist(t) => Match::Ignore(t), ++ } ++ } ++ ++ /// Return the value inside this match if it exists. ++ pub fn inner(&self) -> Option<&T> { ++ match *self { ++ Match::None => None, ++ Match::Ignore(ref t) => Some(t), ++ Match::Whitelist(ref t) => Some(t), ++ } ++ } ++ ++ /// Apply the given function to the value inside this match. ++ /// ++ /// If the match has no value, then return the match unchanged. ++ pub fn map U>(self, f: F) -> Match { ++ match self { ++ Match::None => Match::None, ++ Match::Ignore(t) => Match::Ignore(f(t)), ++ Match::Whitelist(t) => Match::Whitelist(f(t)), ++ } ++ } ++ ++ /// Return the match if it is not none. Otherwise, return other. ++ pub fn or(self, other: Self) -> Self { ++ if self.is_none() { ++ other ++ } else { ++ self ++ } ++ } ++} diff --cc vendor/ignore-0.4.3/src/overrides.rs index 000000000,000000000..c63532af8 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/overrides.rs @@@ -1,0 -1,0 +1,259 @@@ ++/*! ++The overrides module provides a way to specify a set of override globs. ++This provides functionality similar to `--include` or `--exclude` in command ++line tools. ++*/ ++ ++use std::path::Path; ++ ++use gitignore::{self, Gitignore, GitignoreBuilder}; ++use {Error, Match}; ++ ++/// Glob represents a single glob in an override matcher. ++/// ++/// This is used to report information about the highest precedent glob ++/// that matched. ++/// ++/// Note that not all matches necessarily correspond to a specific glob. For ++/// example, if there are one or more whitelist globs and a file path doesn't ++/// match any glob in the set, then the file path is considered to be ignored. ++/// ++/// The lifetime `'a` refers to the lifetime of the matcher that produced ++/// this glob. ++#[derive(Clone, Debug)] ++pub struct Glob<'a>(GlobInner<'a>); ++ ++#[derive(Clone, Debug)] ++enum GlobInner<'a> { ++ /// No glob matched, but the file path should still be ignored. ++ UnmatchedIgnore, ++ /// A glob matched. ++ Matched(&'a gitignore::Glob), ++} ++ ++impl<'a> Glob<'a> { ++ fn unmatched() -> Glob<'a> { ++ Glob(GlobInner::UnmatchedIgnore) ++ } ++} ++ ++/// Manages a set of overrides provided explicitly by the end user. ++#[derive(Clone, Debug)] ++pub struct Override(Gitignore); ++ ++impl Override { ++ /// Returns an empty matcher that never matches any file path. ++ pub fn empty() -> Override { ++ Override(Gitignore::empty()) ++ } ++ ++ /// Returns the directory of this override set. ++ /// ++ /// All matches are done relative to this path. ++ pub fn path(&self) -> &Path { ++ self.0.path() ++ } ++ ++ /// Returns true if and only if this matcher is empty. ++ /// ++ /// When a matcher is empty, it will never match any file path. ++ pub fn is_empty(&self) -> bool { ++ self.0.is_empty() ++ } ++ ++ /// Returns the total number of ignore globs. ++ pub fn num_ignores(&self) -> u64 { ++ self.0.num_whitelists() ++ } ++ ++ /// Returns the total number of whitelisted globs. ++ pub fn num_whitelists(&self) -> u64 { ++ self.0.num_ignores() ++ } ++ ++ /// Returns whether the given file path matched a pattern in this override ++ /// matcher. ++ /// ++ /// `is_dir` should be true if the path refers to a directory and false ++ /// otherwise. ++ /// ++ /// If there are no overrides, then this always returns `Match::None`. ++ /// ++ /// If there is at least one whitelist override and `is_dir` is false, then ++ /// this never returns `Match::None`, since non-matches are interpreted as ++ /// ignored. ++ /// ++ /// The given path is matched to the globs relative to the path given ++ /// when building the override matcher. Specifically, before matching ++ /// `path`, its prefix (as determined by a common suffix of the directory ++ /// given) is stripped. If there is no common suffix/prefix overlap, then ++ /// `path` is assumed to reside in the same directory as the root path for ++ /// this set of overrides. ++ pub fn matched<'a, P: AsRef>( ++ &'a self, ++ path: P, ++ is_dir: bool, ++ ) -> Match> { ++ if self.is_empty() { ++ return Match::None; ++ } ++ let mat = self.0.matched(path, is_dir).invert(); ++ if mat.is_none() && self.num_whitelists() > 0 && !is_dir { ++ return Match::Ignore(Glob::unmatched()); ++ } ++ mat.map(move |giglob| Glob(GlobInner::Matched(giglob))) ++ } ++} ++ ++/// Builds a matcher for a set of glob overrides. ++pub struct OverrideBuilder { ++ builder: GitignoreBuilder, ++} ++ ++impl OverrideBuilder { ++ /// Create a new override builder. ++ /// ++ /// Matching is done relative to the directory path provided. ++ pub fn new>(path: P) -> OverrideBuilder { ++ OverrideBuilder { ++ builder: GitignoreBuilder::new(path), ++ } ++ } ++ ++ /// Builds a new override matcher from the globs added so far. ++ /// ++ /// Once a matcher is built, no new globs can be added to it. ++ pub fn build(&self) -> Result { ++ Ok(Override(self.builder.build()?)) ++ } ++ ++ /// Add a glob to the set of overrides. ++ /// ++ /// Globs provided here have precisely the same semantics as a single ++ /// line in a `gitignore` file, where the meaning of `!` is inverted: ++ /// namely, `!` at the beginning of a glob will ignore a file. Without `!`, ++ /// all matches of the glob provided are treated as whitelist matches. ++ pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> { ++ self.builder.add_line(None, glob)?; ++ Ok(self) ++ } ++ ++ /// Toggle whether the globs should be matched case insensitively or not. ++ /// ++ /// When this option is changed, only globs added after the change will be affected. ++ /// ++ /// This is disabled by default. ++ pub fn case_insensitive( ++ &mut self, yes: bool ++ ) -> Result<&mut OverrideBuilder, Error> { ++ self.builder.case_insensitive(yes)?; ++ Ok(self) ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::{Override, OverrideBuilder}; ++ ++ const ROOT: &'static str = "/home/andrew/foo"; ++ ++ fn ov(globs: &[&str]) -> Override { ++ let mut builder = OverrideBuilder::new(ROOT); ++ for glob in globs { ++ builder.add(glob).unwrap(); ++ } ++ builder.build().unwrap() ++ } ++ ++ #[test] ++ fn empty() { ++ let ov = ov(&[]); ++ assert!(ov.matched("a.foo", false).is_none()); ++ assert!(ov.matched("a", false).is_none()); ++ assert!(ov.matched("", false).is_none()); ++ } ++ ++ #[test] ++ fn simple() { ++ let ov = ov(&["*.foo", "!*.bar"]); ++ assert!(ov.matched("a.foo", false).is_whitelist()); ++ assert!(ov.matched("a.foo", true).is_whitelist()); ++ assert!(ov.matched("a.rs", false).is_ignore()); ++ assert!(ov.matched("a.rs", true).is_none()); ++ assert!(ov.matched("a.bar", false).is_ignore()); ++ assert!(ov.matched("a.bar", true).is_ignore()); ++ } ++ ++ #[test] ++ fn only_ignores() { ++ let ov = ov(&["!*.bar"]); ++ assert!(ov.matched("a.rs", false).is_none()); ++ assert!(ov.matched("a.rs", true).is_none()); ++ assert!(ov.matched("a.bar", false).is_ignore()); ++ assert!(ov.matched("a.bar", true).is_ignore()); ++ } ++ ++ #[test] ++ fn precedence() { ++ let ov = ov(&["*.foo", "!*.bar.foo"]); ++ assert!(ov.matched("a.foo", false).is_whitelist()); ++ assert!(ov.matched("a.baz", false).is_ignore()); ++ assert!(ov.matched("a.bar.foo", false).is_ignore()); ++ } ++ ++ #[test] ++ fn gitignore() { ++ let ov = ov(&["/foo", "bar/*.rs", "baz/**"]); ++ assert!(ov.matched("bar/lib.rs", false).is_whitelist()); ++ assert!(ov.matched("bar/wat/lib.rs", false).is_ignore()); ++ assert!(ov.matched("wat/bar/lib.rs", false).is_ignore()); ++ assert!(ov.matched("foo", false).is_whitelist()); ++ assert!(ov.matched("wat/foo", false).is_ignore()); ++ assert!(ov.matched("baz", false).is_ignore()); ++ assert!(ov.matched("baz/a", false).is_whitelist()); ++ assert!(ov.matched("baz/a/b", false).is_whitelist()); ++ } ++ ++ #[test] ++ fn allow_directories() { ++ // This tests that directories are NOT ignored when they are unmatched. ++ let ov = ov(&["*.rs"]); ++ assert!(ov.matched("foo.rs", false).is_whitelist()); ++ assert!(ov.matched("foo.c", false).is_ignore()); ++ assert!(ov.matched("foo", false).is_ignore()); ++ assert!(ov.matched("foo", true).is_none()); ++ assert!(ov.matched("src/foo.rs", false).is_whitelist()); ++ assert!(ov.matched("src/foo.c", false).is_ignore()); ++ assert!(ov.matched("src/foo", false).is_ignore()); ++ assert!(ov.matched("src/foo", true).is_none()); ++ } ++ ++ #[test] ++ fn absolute_path() { ++ let ov = ov(&["!/bar"]); ++ assert!(ov.matched("./foo/bar", false).is_none()); ++ } ++ ++ #[test] ++ fn case_insensitive() { ++ let ov = OverrideBuilder::new(ROOT) ++ .case_insensitive(true).unwrap() ++ .add("*.html").unwrap() ++ .build().unwrap(); ++ assert!(ov.matched("foo.html", false).is_whitelist()); ++ assert!(ov.matched("foo.HTML", false).is_whitelist()); ++ assert!(ov.matched("foo.htm", false).is_ignore()); ++ assert!(ov.matched("foo.HTM", false).is_ignore()); ++ } ++ ++ #[test] ++ fn default_case_sensitive() { ++ let ov = OverrideBuilder::new(ROOT) ++ .add("*.html").unwrap() ++ .build().unwrap(); ++ assert!(ov.matched("foo.html", false).is_whitelist()); ++ assert!(ov.matched("foo.HTML", false).is_ignore()); ++ assert!(ov.matched("foo.htm", false).is_ignore()); ++ assert!(ov.matched("foo.HTM", false).is_ignore()); ++ } ++} diff --cc vendor/ignore-0.4.3/src/pathutil.rs index 000000000,000000000..bfd43de3e new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/pathutil.rs @@@ -1,0 -1,0 +1,108 @@@ ++use std::ffi::OsStr; ++use std::path::Path; ++ ++/// Returns true if and only if this file path is considered to be hidden. ++#[cfg(unix)] ++pub fn is_hidden>(path: P) -> bool { ++ use std::os::unix::ffi::OsStrExt; ++ ++ if let Some(name) = file_name(path.as_ref()) { ++ name.as_bytes().get(0) == Some(&b'.') ++ } else { ++ false ++ } ++} ++ ++/// Returns true if and only if this file path is considered to be hidden. ++#[cfg(not(unix))] ++pub fn is_hidden>(path: P) -> bool { ++ if let Some(name) = file_name(path.as_ref()) { ++ name.to_str().map(|s| s.starts_with(".")).unwrap_or(false) ++ } else { ++ false ++ } ++} ++ ++/// Strip `prefix` from the `path` and return the remainder. ++/// ++/// If `path` doesn't have a prefix `prefix`, then return `None`. ++#[cfg(unix)] ++pub fn strip_prefix<'a, P: AsRef + ?Sized>( ++ prefix: &'a P, ++ path: &'a Path, ++) -> Option<&'a Path> { ++ use std::os::unix::ffi::OsStrExt; ++ ++ let prefix = prefix.as_ref().as_os_str().as_bytes(); ++ let path = path.as_os_str().as_bytes(); ++ if prefix.len() > path.len() || prefix != &path[0..prefix.len()] { ++ None ++ } else { ++ Some(&Path::new(OsStr::from_bytes(&path[prefix.len()..]))) ++ } ++} ++ ++/// Strip `prefix` from the `path` and return the remainder. ++/// ++/// If `path` doesn't have a prefix `prefix`, then return `None`. ++#[cfg(not(unix))] ++pub fn strip_prefix<'a, P: AsRef + ?Sized>( ++ prefix: &'a P, ++ path: &'a Path, ++) -> Option<&'a Path> { ++ path.strip_prefix(prefix).ok() ++} ++ ++/// Returns true if this file path is just a file name. i.e., Its parent is ++/// the empty string. ++#[cfg(unix)] ++pub fn is_file_name>(path: P) -> bool { ++ use std::os::unix::ffi::OsStrExt; ++ use memchr::memchr; ++ ++ let path = path.as_ref().as_os_str().as_bytes(); ++ memchr(b'/', path).is_none() ++} ++ ++/// Returns true if this file path is just a file name. i.e., Its parent is ++/// the empty string. ++#[cfg(not(unix))] ++pub fn is_file_name>(path: P) -> bool { ++ path.as_ref().parent().map(|p| p.as_os_str().is_empty()).unwrap_or(false) ++} ++ ++/// The final component of the path, if it is a normal file. ++/// ++/// If the path terminates in ., .., or consists solely of a root of prefix, ++/// file_name will return None. ++#[cfg(unix)] ++pub fn file_name<'a, P: AsRef + ?Sized>( ++ path: &'a P, ++) -> Option<&'a OsStr> { ++ use std::os::unix::ffi::OsStrExt; ++ use memchr::memrchr; ++ ++ let path = path.as_ref().as_os_str().as_bytes(); ++ if path.is_empty() { ++ return None; ++ } else if path.len() == 1 && path[0] == b'.' { ++ return None; ++ } else if path.last() == Some(&b'.') { ++ return None; ++ } else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] { ++ return None; ++ } ++ let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0); ++ Some(OsStr::from_bytes(&path[last_slash..])) ++} ++ ++/// The final component of the path, if it is a normal file. ++/// ++/// If the path terminates in ., .., or consists solely of a root of prefix, ++/// file_name will return None. ++#[cfg(not(unix))] ++pub fn file_name<'a, P: AsRef + ?Sized>( ++ path: &'a P, ++) -> Option<&'a OsStr> { ++ path.as_ref().file_name() ++} diff --cc vendor/ignore-0.4.3/src/types.rs index 000000000,000000000..8ecbb2b01 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/types.rs @@@ -1,0 -1,0 +1,781 @@@ ++/*! ++The types module provides a way of associating globs on file names to file ++types. ++ ++This can be used to match specific types of files. For example, among ++the default file types provided, the Rust file type is defined to be `*.rs` ++with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with ++name `c`. ++ ++Note that the set of default types may change over time. ++ ++# Example ++ ++This shows how to create and use a simple file type matcher using the default ++file types defined in this crate. ++ ++``` ++use ignore::types::TypesBuilder; ++ ++let mut builder = TypesBuilder::new(); ++builder.add_defaults(); ++builder.select("rust"); ++let matcher = builder.build().unwrap(); ++ ++assert!(matcher.matched("foo.rs", false).is_whitelist()); ++assert!(matcher.matched("foo.c", false).is_ignore()); ++``` ++ ++# Example: negation ++ ++This is like the previous example, but shows how negating a file type works. ++That is, this will let us match file paths that *don't* correspond to a ++particular file type. ++ ++``` ++use ignore::types::TypesBuilder; ++ ++let mut builder = TypesBuilder::new(); ++builder.add_defaults(); ++builder.negate("c"); ++let matcher = builder.build().unwrap(); ++ ++assert!(matcher.matched("foo.rs", false).is_none()); ++assert!(matcher.matched("foo.c", false).is_ignore()); ++``` ++ ++# Example: custom file type definitions ++ ++This shows how to extend this library default file type definitions with ++your own. ++ ++``` ++use ignore::types::TypesBuilder; ++ ++let mut builder = TypesBuilder::new(); ++builder.add_defaults(); ++builder.add("foo", "*.foo"); ++// Another way of adding a file type definition. ++// This is useful when accepting input from an end user. ++builder.add_def("bar:*.bar"); ++// Note: we only select `foo`, not `bar`. ++builder.select("foo"); ++let matcher = builder.build().unwrap(); ++ ++assert!(matcher.matched("x.foo", false).is_whitelist()); ++// This is ignored because we only selected the `foo` file type. ++assert!(matcher.matched("x.bar", false).is_ignore()); ++``` ++ ++We can also add file type definitions based on other definitions. ++ ++``` ++use ignore::types::TypesBuilder; ++ ++let mut builder = TypesBuilder::new(); ++builder.add_defaults(); ++builder.add("foo", "*.foo"); ++builder.add_def("bar:include:foo,cpp"); ++builder.select("bar"); ++let matcher = builder.build().unwrap(); ++ ++assert!(matcher.matched("x.foo", false).is_whitelist()); ++assert!(matcher.matched("y.cpp", false).is_whitelist()); ++``` ++*/ ++ ++use std::cell::RefCell; ++use std::collections::HashMap; ++use std::path::Path; ++use std::sync::Arc; ++ ++use globset::{GlobBuilder, GlobSet, GlobSetBuilder}; ++use regex::Regex; ++use thread_local::ThreadLocal; ++ ++use pathutil::file_name; ++use {Error, Match}; ++ ++const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[ ++ ("agda", &["*.agda", "*.lagda"]), ++ ("aidl", &["*.aidl"]), ++ ("amake", &["*.mk", "*.bp"]), ++ ("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]), ++ ("asm", &["*.asm", "*.s", "*.S"]), ++ ("avro", &["*.avdl", "*.avpr", "*.avsc"]), ++ ("awk", &["*.awk"]), ++ ("bazel", &["*.bzl", "WORKSPACE", "BUILD"]), ++ ("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]), ++ ("bzip2", &["*.bz2"]), ++ ("c", &["*.c", "*.h", "*.H"]), ++ ("cabal", &["*.cabal"]), ++ ("cbor", &["*.cbor"]), ++ ("ceylon", &["*.ceylon"]), ++ ("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]), ++ ("cmake", &["*.cmake", "CMakeLists.txt"]), ++ ("coffeescript", &["*.coffee"]), ++ ("creole", &["*.creole"]), ++ ("config", &["*.cfg", "*.conf", "*.config", "*.ini"]), ++ ("cpp", &[ ++ "*.C", "*.cc", "*.cpp", "*.cxx", ++ "*.h", "*.H", "*.hh", "*.hpp", "*.hxx", "*.inl", ++ ]), ++ ("crystal", &["Projectfile", "*.cr"]), ++ ("cs", &["*.cs"]), ++ ("csharp", &["*.cs"]), ++ ("cshtml", &["*.cshtml"]), ++ ("css", &["*.css", "*.scss"]), ++ ("csv", &["*.csv"]), ++ ("cython", &["*.pyx"]), ++ ("dart", &["*.dart"]), ++ ("d", &["*.d"]), ++ ("docker", &["*Dockerfile*"]), ++ ("elisp", &["*.el"]), ++ ("elixir", &["*.ex", "*.eex", "*.exs"]), ++ ("elm", &["*.elm"]), ++ ("erlang", &["*.erl", "*.hrl"]), ++ ("fidl", &["*.fidl"]), ++ ("fish", &["*.fish"]), ++ ("fortran", &[ ++ "*.f", "*.F", "*.f77", "*.F77", "*.pfo", ++ "*.f90", "*.F90", "*.f95", "*.F95", ++ ]), ++ ("fsharp", &["*.fs", "*.fsx", "*.fsi"]), ++ ("gn", &["*.gn", "*.gni"]), ++ ("go", &["*.go"]), ++ ("gzip", &["*.gz"]), ++ ("groovy", &["*.groovy", "*.gradle"]), ++ ("h", &["*.h", "*.hpp"]), ++ ("hbs", &["*.hbs"]), ++ ("haskell", &["*.hs", "*.lhs"]), ++ ("hs", &["*.hs", "*.lhs"]), ++ ("html", &["*.htm", "*.html", "*.ejs"]), ++ ("java", &["*.java", "*.jsp"]), ++ ("jinja", &["*.j2", "*.jinja", "*.jinja2"]), ++ ("js", &[ ++ "*.js", "*.jsx", "*.vue", ++ ]), ++ ("json", &["*.json", "composer.lock"]), ++ ("jsonl", &["*.jsonl"]), ++ ("julia", &["*.jl"]), ++ ("jupyter", &["*.ipynb", "*.jpynb"]), ++ ("jl", &["*.jl"]), ++ ("kotlin", &["*.kt", "*.kts"]), ++ ("less", &["*.less"]), ++ ("license", &[ ++ // General ++ "COPYING", "COPYING[.-]*", ++ "COPYRIGHT", "COPYRIGHT[.-]*", ++ "EULA", "EULA[.-]*", ++ "licen[cs]e", "licen[cs]e.*", ++ "LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*", ++ "NOTICE", "NOTICE[.-]*", ++ "PATENTS", "PATENTS[.-]*", ++ "UNLICEN[CS]E", "UNLICEN[CS]E[.-]*", ++ // GPL (gpl.txt, etc.) ++ "agpl[.-]*", ++ "gpl[.-]*", ++ "lgpl[.-]*", ++ // Other license-specific (APACHE-2.0.txt, etc.) ++ "AGPL-*[0-9]*", ++ "APACHE-*[0-9]*", ++ "BSD-*[0-9]*", ++ "CC-BY-*", ++ "GFDL-*[0-9]*", ++ "GNU-*[0-9]*", ++ "GPL-*[0-9]*", ++ "LGPL-*[0-9]*", ++ "MIT-*[0-9]*", ++ "MPL-*[0-9]*", ++ "OFL-*[0-9]*", ++ ]), ++ ("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]), ++ ("log", &["*.log"]), ++ ("lua", &["*.lua"]), ++ ("lzma", &["*.lzma"]), ++ ("lz4", &["*.lz4"]), ++ ("m4", &["*.ac", "*.m4"]), ++ ("make", &[ ++ "gnumakefile", "Gnumakefile", "GNUmakefile", ++ "makefile", "Makefile", ++ "*.mk", "*.mak" ++ ]), ++ ("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), ++ ("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), ++ ("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]), ++ ("matlab", &["*.m"]), ++ ("mk", &["mkfile"]), ++ ("ml", &["*.ml"]), ++ ("msbuild", &[ ++ "*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets" ++ ]), ++ ("nim", &["*.nim"]), ++ ("nix", &["*.nix"]), ++ ("objc", &["*.h", "*.m"]), ++ ("objcpp", &["*.h", "*.mm"]), ++ ("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]), ++ ("org", &["*.org"]), ++ ("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]), ++ ("pdf", &["*.pdf"]), ++ ("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]), ++ ("pod", &["*.pod"]), ++ ("protobuf", &["*.proto"]), ++ ("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]), ++ ("puppet", &["*.erb", "*.pp", "*.rb"]), ++ ("purs", &["*.purs"]), ++ ("py", &["*.py"]), ++ ("qmake", &["*.pro", "*.pri", "*.prf"]), ++ ("readme", &["README*", "*README"]), ++ ("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]), ++ ("rdoc", &["*.rdoc"]), ++ ("rst", &["*.rst"]), ++ ("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]), ++ ("rust", &["*.rs"]), ++ ("sass", &["*.sass", "*.scss"]), ++ ("scala", &["*.scala"]), ++ ("sh", &[ ++ // Portable/misc. init files ++ ".login", ".logout", ".profile", "profile", ++ // bash-specific init files ++ ".bash_login", "bash_login", ++ ".bash_logout", "bash_logout", ++ ".bash_profile", "bash_profile", ++ ".bashrc", "bashrc", "*.bashrc", ++ // csh-specific init files ++ ".cshrc", "*.cshrc", ++ // ksh-specific init files ++ ".kshrc", "*.kshrc", ++ // tcsh-specific init files ++ ".tcshrc", ++ // zsh-specific init files ++ ".zshenv", "zshenv", ++ ".zlogin", "zlogin", ++ ".zlogout", "zlogout", ++ ".zprofile", "zprofile", ++ ".zshrc", "zshrc", ++ // Extensions ++ "*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh", ++ ]), ++ ("smarty", &["*.tpl"]), ++ ("sml", &["*.sml", "*.sig"]), ++ ("soy", &["*.soy"]), ++ ("spark", &["*.spark"]), ++ ("sql", &["*.sql", "*.psql"]), ++ ("stylus", &["*.styl"]), ++ ("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]), ++ ("svg", &["*.svg"]), ++ ("swift", &["*.swift"]), ++ ("swig", &["*.def", "*.i"]), ++ ("systemd", &[ ++ "*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path", ++ "*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target", ++ "*.timer", ++ ]), ++ ("taskpaper", &["*.taskpaper"]), ++ ("tcl", &["*.tcl"]), ++ ("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]), ++ ("textile", &["*.textile"]), ++ ("tf", &["*.tf"]), ++ ("ts", &["*.ts", "*.tsx"]), ++ ("txt", &["*.txt"]), ++ ("toml", &["*.toml", "Cargo.lock"]), ++ ("twig", &["*.twig"]), ++ ("vala", &["*.vala"]), ++ ("vb", &["*.vb"]), ++ ("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]), ++ ("vhdl", &["*.vhd", "*.vhdl"]), ++ ("vim", &["*.vim"]), ++ ("vimscript", &["*.vim"]), ++ ("wiki", &["*.mediawiki", "*.wiki"]), ++ ("webidl", &["*.idl", "*.webidl", "*.widl"]), ++ ("xml", &["*.xml", "*.xml.dist"]), ++ ("xz", &["*.xz"]), ++ ("yacc", &["*.y"]), ++ ("yaml", &["*.yaml", "*.yml"]), ++ ("zsh", &[ ++ ".zshenv", "zshenv", ++ ".zlogin", "zlogin", ++ ".zlogout", "zlogout", ++ ".zprofile", "zprofile", ++ ".zshrc", "zshrc", ++ "*.zsh", ++ ]), ++]; ++ ++/// Glob represents a single glob in a set of file type definitions. ++/// ++/// There may be more than one glob for a particular file type. ++/// ++/// This is used to report information about the highest precedent glob ++/// that matched. ++/// ++/// Note that not all matches necessarily correspond to a specific glob. ++/// For example, if there are one or more selections and a file path doesn't ++/// match any of those selections, then the file path is considered to be ++/// ignored. ++/// ++/// The lifetime `'a` refers to the lifetime of the underlying file type ++/// definition, which corresponds to the lifetime of the file type matcher. ++#[derive(Clone, Debug)] ++pub struct Glob<'a>(GlobInner<'a>); ++ ++#[derive(Clone, Debug)] ++enum GlobInner<'a> { ++ /// No glob matched, but the file path should still be ignored. ++ UnmatchedIgnore, ++ /// A glob matched. ++ Matched { ++ /// The file type definition which provided the glob. ++ def: &'a FileTypeDef, ++ /// The index of the glob that matched inside the file type definition. ++ which: usize, ++ /// Whether the selection was negated or not. ++ negated: bool, ++ } ++} ++ ++impl<'a> Glob<'a> { ++ fn unmatched() -> Glob<'a> { ++ Glob(GlobInner::UnmatchedIgnore) ++ } ++} ++ ++/// A single file type definition. ++/// ++/// File type definitions can be retrieved in aggregate from a file type ++/// matcher. File type definitions are also reported when its responsible ++/// for a match. ++#[derive(Clone, Debug, Eq, PartialEq)] ++pub struct FileTypeDef { ++ name: String, ++ globs: Vec, ++} ++ ++impl FileTypeDef { ++ /// Return the name of this file type. ++ pub fn name(&self) -> &str { ++ &self.name ++ } ++ ++ /// Return the globs used to recognize this file type. ++ pub fn globs(&self) -> &[String] { ++ &self.globs ++ } ++} ++ ++/// Types is a file type matcher. ++#[derive(Clone, Debug)] ++pub struct Types { ++ /// All of the file type definitions, sorted lexicographically by name. ++ defs: Vec, ++ /// All of the selections made by the user. ++ selections: Vec>, ++ /// Whether there is at least one Selection::Select in our selections. ++ /// When this is true, a Match::None is converted to Match::Ignore. ++ has_selected: bool, ++ /// A mapping from glob index in the set to two indices. The first is an ++ /// index into `selections` and the second is an index into the ++ /// corresponding file type definition's list of globs. ++ glob_to_selection: Vec<(usize, usize)>, ++ /// The set of all glob selections, used for actual matching. ++ set: GlobSet, ++ /// Temporary storage for globs that match. ++ matches: Arc>>>, ++} ++ ++/// Indicates the type of a selection for a particular file type. ++#[derive(Clone, Debug)] ++enum Selection { ++ Select(String, T), ++ Negate(String, T), ++} ++ ++impl Selection { ++ fn is_negated(&self) -> bool { ++ match *self { ++ Selection::Select(..) => false, ++ Selection::Negate(..) => true, ++ } ++ } ++ ++ fn name(&self) -> &str { ++ match *self { ++ Selection::Select(ref name, _) => name, ++ Selection::Negate(ref name, _) => name, ++ } ++ } ++ ++ fn map U>(self, f: F) -> Selection { ++ match self { ++ Selection::Select(name, inner) => { ++ Selection::Select(name, f(inner)) ++ } ++ Selection::Negate(name, inner) => { ++ Selection::Negate(name, f(inner)) ++ } ++ } ++ } ++ ++ fn inner(&self) -> &T { ++ match *self { ++ Selection::Select(_, ref inner) => inner, ++ Selection::Negate(_, ref inner) => inner, ++ } ++ } ++} ++ ++impl Types { ++ /// Creates a new file type matcher that never matches any path and ++ /// contains no file type definitions. ++ pub fn empty() -> Types { ++ Types { ++ defs: vec![], ++ selections: vec![], ++ has_selected: false, ++ glob_to_selection: vec![], ++ set: GlobSetBuilder::new().build().unwrap(), ++ matches: Arc::new(ThreadLocal::default()), ++ } ++ } ++ ++ /// Returns true if and only if this matcher has zero selections. ++ pub fn is_empty(&self) -> bool { ++ self.selections.is_empty() ++ } ++ ++ /// Returns the number of selections used in this matcher. ++ pub fn len(&self) -> usize { ++ self.selections.len() ++ } ++ ++ /// Return the set of current file type definitions. ++ /// ++ /// Definitions and globs are sorted. ++ pub fn definitions(&self) -> &[FileTypeDef] { ++ &self.defs ++ } ++ ++ /// Returns a match for the given path against this file type matcher. ++ /// ++ /// The path is considered whitelisted if it matches a selected file type. ++ /// The path is considered ignored if it matches a negated file type. ++ /// If at least one file type is selected and `path` doesn't match, then ++ /// the path is also considered ignored. ++ pub fn matched<'a, P: AsRef>( ++ &'a self, ++ path: P, ++ is_dir: bool, ++ ) -> Match> { ++ // File types don't apply to directories, and we can't do anything ++ // if our glob set is empty. ++ if is_dir || self.set.is_empty() { ++ return Match::None; ++ } ++ // We only want to match against the file name, so extract it. ++ // If one doesn't exist, then we can't match it. ++ let name = match file_name(path.as_ref()) { ++ Some(name) => name, ++ None if self.has_selected => { ++ return Match::Ignore(Glob::unmatched()); ++ } ++ None => { ++ return Match::None; ++ } ++ }; ++ let mut matches = self.matches.get_default().borrow_mut(); ++ self.set.matches_into(name, &mut *matches); ++ // The highest precedent match is the last one. ++ if let Some(&i) = matches.last() { ++ let (isel, iglob) = self.glob_to_selection[i]; ++ let sel = &self.selections[isel]; ++ let glob = Glob(GlobInner::Matched { ++ def: sel.inner(), ++ which: iglob, ++ negated: sel.is_negated(), ++ }); ++ return if sel.is_negated() { ++ Match::Ignore(glob) ++ } else { ++ Match::Whitelist(glob) ++ }; ++ } ++ if self.has_selected { ++ Match::Ignore(Glob::unmatched()) ++ } else { ++ Match::None ++ } ++ } ++} ++ ++/// TypesBuilder builds a type matcher from a set of file type definitions and ++/// a set of file type selections. ++pub struct TypesBuilder { ++ types: HashMap, ++ selections: Vec>, ++} ++ ++impl TypesBuilder { ++ /// Create a new builder for a file type matcher. ++ /// ++ /// The builder contains *no* type definitions to start with. A set ++ /// of default type definitions can be added with `add_defaults`, and ++ /// additional type definitions can be added with `select` and `negate`. ++ pub fn new() -> TypesBuilder { ++ TypesBuilder { ++ types: HashMap::new(), ++ selections: vec![], ++ } ++ } ++ ++ /// Build the current set of file type definitions *and* selections into ++ /// a file type matcher. ++ pub fn build(&self) -> Result { ++ let defs = self.definitions(); ++ let has_selected = self.selections.iter().any(|s| !s.is_negated()); ++ ++ let mut selections = vec![]; ++ let mut glob_to_selection = vec![]; ++ let mut build_set = GlobSetBuilder::new(); ++ for (isel, selection) in self.selections.iter().enumerate() { ++ let def = match self.types.get(selection.name()) { ++ Some(def) => def.clone(), ++ None => { ++ let name = selection.name().to_string(); ++ return Err(Error::UnrecognizedFileType(name)); ++ } ++ }; ++ for (iglob, glob) in def.globs.iter().enumerate() { ++ build_set.add( ++ GlobBuilder::new(glob) ++ .literal_separator(true) ++ .build() ++ .map_err(|err| { ++ Error::Glob { ++ glob: Some(glob.to_string()), ++ err: err.kind().to_string(), ++ } ++ })?); ++ glob_to_selection.push((isel, iglob)); ++ } ++ selections.push(selection.clone().map(move |_| def)); ++ } ++ let set = build_set.build().map_err(|err| { ++ Error::Glob { glob: None, err: err.to_string() } ++ })?; ++ Ok(Types { ++ defs: defs, ++ selections: selections, ++ has_selected: has_selected, ++ glob_to_selection: glob_to_selection, ++ set: set, ++ matches: Arc::new(ThreadLocal::default()), ++ }) ++ } ++ ++ /// Return the set of current file type definitions. ++ /// ++ /// Definitions and globs are sorted. ++ pub fn definitions(&self) -> Vec { ++ let mut defs = vec![]; ++ for def in self.types.values() { ++ let mut def = def.clone(); ++ def.globs.sort(); ++ defs.push(def); ++ } ++ defs.sort_by(|def1, def2| def1.name().cmp(def2.name())); ++ defs ++ } ++ ++ /// Select the file type given by `name`. ++ /// ++ /// If `name` is `all`, then all file types currently defined are selected. ++ pub fn select(&mut self, name: &str) -> &mut TypesBuilder { ++ if name == "all" { ++ for name in self.types.keys() { ++ self.selections.push(Selection::Select(name.to_string(), ())); ++ } ++ } else { ++ self.selections.push(Selection::Select(name.to_string(), ())); ++ } ++ self ++ } ++ ++ /// Ignore the file type given by `name`. ++ /// ++ /// If `name` is `all`, then all file types currently defined are negated. ++ pub fn negate(&mut self, name: &str) -> &mut TypesBuilder { ++ if name == "all" { ++ for name in self.types.keys() { ++ self.selections.push(Selection::Negate(name.to_string(), ())); ++ } ++ } else { ++ self.selections.push(Selection::Negate(name.to_string(), ())); ++ } ++ self ++ } ++ ++ /// Clear any file type definitions for the type name given. ++ pub fn clear(&mut self, name: &str) -> &mut TypesBuilder { ++ self.types.remove(name); ++ self ++ } ++ ++ /// Add a new file type definition. `name` can be arbitrary and `pat` ++ /// should be a glob recognizing file paths belonging to the `name` type. ++ /// ++ /// If `name` is `all` or otherwise contains any character that is not a ++ /// Unicode letter or number, then an error is returned. ++ pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> { ++ lazy_static! { ++ static ref RE: Regex = Regex::new(r"^[\pL\pN]+$").unwrap(); ++ }; ++ if name == "all" || !RE.is_match(name) { ++ return Err(Error::InvalidDefinition); ++ } ++ let (key, glob) = (name.to_string(), glob.to_string()); ++ self.types.entry(key).or_insert_with(|| { ++ FileTypeDef { name: name.to_string(), globs: vec![] } ++ }).globs.push(glob); ++ Ok(()) ++ } ++ ++ /// Add a new file type definition specified in string form. There are two ++ /// valid formats: ++ /// 1. `{name}:{glob}`. This defines a 'root' definition that associates the ++ /// given name with the given glob. ++ /// 2. `{name}:include:{comma-separated list of already defined names}. ++ /// This defines an 'include' definition that associates the given name ++ /// with the definitions of the given existing types. ++ /// Names may not include any characters that are not ++ /// Unicode letters or numbers. ++ pub fn add_def(&mut self, def: &str) -> Result<(), Error> { ++ let parts: Vec<&str> = def.split(':').collect(); ++ match parts.len() { ++ 2 => { ++ let name = parts[0]; ++ let glob = parts[1]; ++ if name.is_empty() || glob.is_empty() { ++ return Err(Error::InvalidDefinition); ++ } ++ self.add(name, glob) ++ } ++ 3 => { ++ let name = parts[0]; ++ let types_string = parts[2]; ++ if name.is_empty() || parts[1] != "include" || types_string.is_empty() { ++ return Err(Error::InvalidDefinition); ++ } ++ let types = types_string.split(','); ++ // Check ahead of time to ensure that all types specified are ++ // present and fail fast if not. ++ if types.clone().any(|t| !self.types.contains_key(t)) { ++ return Err(Error::InvalidDefinition); ++ } ++ for type_name in types { ++ let globs = self.types.get(type_name).unwrap().globs.clone(); ++ for glob in globs { ++ self.add(name, &glob)?; ++ } ++ } ++ Ok(()) ++ } ++ _ => Err(Error::InvalidDefinition) ++ } ++ } ++ ++ /// Add a set of default file type definitions. ++ pub fn add_defaults(&mut self) -> &mut TypesBuilder { ++ static MSG: &'static str = "adding a default type should never fail"; ++ for &(name, exts) in DEFAULT_TYPES { ++ for ext in exts { ++ self.add(name, ext).expect(MSG); ++ } ++ } ++ self ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::TypesBuilder; ++ ++ macro_rules! matched { ++ ($name:ident, $types:expr, $sel:expr, $selnot:expr, ++ $path:expr) => { ++ matched!($name, $types, $sel, $selnot, $path, true); ++ }; ++ (not, $name:ident, $types:expr, $sel:expr, $selnot:expr, ++ $path:expr) => { ++ matched!($name, $types, $sel, $selnot, $path, false); ++ }; ++ ($name:ident, $types:expr, $sel:expr, $selnot:expr, ++ $path:expr, $matched:expr) => { ++ #[test] ++ fn $name() { ++ let mut btypes = TypesBuilder::new(); ++ for tydef in $types { ++ btypes.add_def(tydef).unwrap(); ++ } ++ for sel in $sel { ++ btypes.select(sel); ++ } ++ for selnot in $selnot { ++ btypes.negate(selnot); ++ } ++ let types = btypes.build().unwrap(); ++ let mat = types.matched($path, false); ++ assert_eq!($matched, !mat.is_ignore()); ++ } ++ }; ++ } ++ ++ fn types() -> Vec<&'static str> { ++ vec![ ++ "html:*.html", ++ "html:*.htm", ++ "rust:*.rs", ++ "js:*.js", ++ "foo:*.{rs,foo}", ++ "combo:include:html,rust" ++ ] ++ } ++ ++ matched!(match1, types(), vec!["rust"], vec![], "lib.rs"); ++ matched!(match2, types(), vec!["html"], vec![], "index.html"); ++ matched!(match3, types(), vec!["html"], vec![], "index.htm"); ++ matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs"); ++ matched!(match5, types(), vec![], vec![], "index.html"); ++ matched!(match6, types(), vec![], vec!["rust"], "index.html"); ++ matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo"); ++ matched!(match8, types(), vec!["combo"], vec![], "index.html"); ++ matched!(match9, types(), vec!["combo"], vec![], "lib.rs"); ++ ++ matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html"); ++ matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs"); ++ matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs"); ++ matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs"); ++ matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo"); ++ matched!(not, matchnot6, types(), vec!["combo"], vec![], "leftpad.js"); ++ ++ #[test] ++ fn test_invalid_defs() { ++ let mut btypes = TypesBuilder::new(); ++ for tydef in types() { ++ btypes.add_def(tydef).unwrap(); ++ } ++ // Preserve the original definitions for later comparison. ++ let original_defs = btypes.definitions(); ++ let bad_defs = vec![ ++ // Reference to type that does not exist ++ "combo:include:html,python", ++ // Bad format ++ "combo:foobar:html,rust", ++ "" ++ ]; ++ for def in bad_defs { ++ assert!(btypes.add_def(def).is_err()); ++ // Ensure that nothing changed, even if some of the includes were valid. ++ assert_eq!(btypes.definitions(), original_defs); ++ } ++ } ++} diff --cc vendor/ignore-0.4.3/src/walk.rs index 000000000,000000000..fc36b4e25 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/src/walk.rs @@@ -1,0 -1,0 +1,1771 @@@ ++use std::cmp; ++use std::ffi::OsStr; ++use std::fmt; ++use std::fs::{self, FileType, Metadata}; ++use std::io; ++use std::path::{Path, PathBuf}; ++use std::sync::Arc; ++use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; ++use std::thread; ++use std::time::Duration; ++use std::vec; ++ ++use crossbeam::sync::MsQueue; ++use same_file::Handle; ++use walkdir::{self, WalkDir}; ++ ++use dir::{Ignore, IgnoreBuilder}; ++use gitignore::GitignoreBuilder; ++use overrides::Override; ++use types::Types; ++use {Error, PartialErrorBuilder}; ++ ++/// A directory entry with a possible error attached. ++/// ++/// The error typically refers to a problem parsing ignore files in a ++/// particular directory. ++#[derive(Clone, Debug)] ++pub struct DirEntry { ++ dent: DirEntryInner, ++ err: Option, ++} ++ ++impl DirEntry { ++ /// The full path that this entry represents. ++ pub fn path(&self) -> &Path { ++ self.dent.path() ++ } ++ ++ /// Whether this entry corresponds to a symbolic link or not. ++ pub fn path_is_symlink(&self) -> bool { ++ self.dent.path_is_symlink() ++ } ++ ++ /// Returns true if and only if this entry corresponds to stdin. ++ /// ++ /// i.e., The entry has depth 0 and its file name is `-`. ++ pub fn is_stdin(&self) -> bool { ++ self.dent.is_stdin() ++ } ++ ++ /// Return the metadata for the file that this entry points to. ++ pub fn metadata(&self) -> Result { ++ self.dent.metadata() ++ } ++ ++ /// Return the file type for the file that this entry points to. ++ /// ++ /// This entry doesn't have a file type if it corresponds to stdin. ++ pub fn file_type(&self) -> Option { ++ self.dent.file_type() ++ } ++ ++ /// Return the file name of this entry. ++ /// ++ /// If this entry has no file name (e.g., `/`), then the full path is ++ /// returned. ++ pub fn file_name(&self) -> &OsStr { ++ self.dent.file_name() ++ } ++ ++ /// Returns the depth at which this entry was created relative to the root. ++ pub fn depth(&self) -> usize { ++ self.dent.depth() ++ } ++ ++ /// Returns the underlying inode number if one exists. ++ /// ++ /// If this entry doesn't have an inode number, then `None` is returned. ++ #[cfg(unix)] ++ pub fn ino(&self) -> Option { ++ self.dent.ino() ++ } ++ ++ /// Returns an error, if one exists, associated with processing this entry. ++ /// ++ /// An example of an error is one that occurred while parsing an ignore ++ /// file. ++ pub fn error(&self) -> Option<&Error> { ++ self.err.as_ref() ++ } ++ ++ /// Returns true if and only if this entry points to a directory. ++ fn is_dir(&self) -> bool { ++ self.dent.is_dir() ++ } ++ ++ fn new_stdin() -> DirEntry { ++ DirEntry { ++ dent: DirEntryInner::Stdin, ++ err: None, ++ } ++ } ++ ++ fn new_walkdir(dent: walkdir::DirEntry, err: Option) -> DirEntry { ++ DirEntry { ++ dent: DirEntryInner::Walkdir(dent), ++ err: err, ++ } ++ } ++ ++ fn new_raw(dent: DirEntryRaw, err: Option) -> DirEntry { ++ DirEntry { ++ dent: DirEntryInner::Raw(dent), ++ err: err, ++ } ++ } ++} ++ ++/// DirEntryInner is the implementation of DirEntry. ++/// ++/// It specifically represents three distinct sources of directory entries: ++/// ++/// 1. From the walkdir crate. ++/// 2. Special entries that represent things like stdin. ++/// 3. From a path. ++/// ++/// Specifically, (3) has to essentially re-create the DirEntry implementation ++/// from WalkDir. ++#[derive(Clone, Debug)] ++enum DirEntryInner { ++ Stdin, ++ Walkdir(walkdir::DirEntry), ++ Raw(DirEntryRaw), ++} ++ ++impl DirEntryInner { ++ fn path(&self) -> &Path { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => Path::new(""), ++ Walkdir(ref x) => x.path(), ++ Raw(ref x) => x.path(), ++ } ++ } ++ ++ fn path_is_symlink(&self) -> bool { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => false, ++ Walkdir(ref x) => x.path_is_symlink(), ++ Raw(ref x) => x.path_is_symlink(), ++ } ++ } ++ ++ fn is_stdin(&self) -> bool { ++ match *self { ++ DirEntryInner::Stdin => true, ++ _ => false, ++ } ++ } ++ ++ fn metadata(&self) -> Result { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => { ++ let err = Error::Io(io::Error::new( ++ io::ErrorKind::Other, " has no metadata")); ++ Err(err.with_path("")) ++ } ++ Walkdir(ref x) => { ++ x.metadata().map_err(|err| { ++ Error::Io(io::Error::from(err)).with_path(x.path()) ++ }) ++ } ++ Raw(ref x) => x.metadata(), ++ } ++ } ++ ++ fn file_type(&self) -> Option { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => None, ++ Walkdir(ref x) => Some(x.file_type()), ++ Raw(ref x) => Some(x.file_type()), ++ } ++ } ++ ++ fn file_name(&self) -> &OsStr { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => OsStr::new(""), ++ Walkdir(ref x) => x.file_name(), ++ Raw(ref x) => x.file_name(), ++ } ++ } ++ ++ fn depth(&self) -> usize { ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => 0, ++ Walkdir(ref x) => x.depth(), ++ Raw(ref x) => x.depth(), ++ } ++ } ++ ++ #[cfg(unix)] ++ fn ino(&self) -> Option { ++ use walkdir::DirEntryExt; ++ use self::DirEntryInner::*; ++ match *self { ++ Stdin => None, ++ Walkdir(ref x) => Some(x.ino()), ++ Raw(ref x) => Some(x.ino()), ++ } ++ } ++ ++ /// Returns true if and only if this entry points to a directory. ++ /// ++ /// This works around a bug in Rust's standard library: ++ /// https://github.com/rust-lang/rust/issues/46484 ++ #[cfg(windows)] ++ fn is_dir(&self) -> bool { ++ self.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false) ++ } ++ ++ /// Returns true if and only if this entry points to a directory. ++ /// ++ /// This works around a bug in Rust's standard library: ++ /// https://github.com/rust-lang/rust/issues/46484 ++ #[cfg(not(windows))] ++ fn is_dir(&self) -> bool { ++ self.file_type().map(|ft| ft.is_dir()).unwrap_or(false) ++ } ++} ++ ++/// DirEntryRaw is essentially copied from the walkdir crate so that we can ++/// build `DirEntry`s from whole cloth in the parallel iterator. ++#[derive(Clone)] ++struct DirEntryRaw { ++ /// The path as reported by the `fs::ReadDir` iterator (even if it's a ++ /// symbolic link). ++ path: PathBuf, ++ /// The file type. Necessary for recursive iteration, so store it. ++ ty: FileType, ++ /// Is set when this entry was created from a symbolic link and the user ++ /// expects the iterator to follow symbolic links. ++ follow_link: bool, ++ /// The depth at which this entry was generated relative to the root. ++ depth: usize, ++ /// The underlying inode number (Unix only). ++ #[cfg(unix)] ++ ino: u64, ++ /// The underlying metadata (Windows only). We store this on Windows ++ /// because this comes for free while reading a directory. ++ /// ++ /// We use this to determine whether an entry is a directory or not, which ++ /// works around a bug in Rust's standard library: ++ /// https://github.com/rust-lang/rust/issues/46484 ++ #[cfg(windows)] ++ metadata: fs::Metadata, ++} ++ ++impl fmt::Debug for DirEntryRaw { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ // Leaving out FileType because it doesn't have a debug impl ++ // in Rust 1.9. We could add it if we really wanted to by manually ++ // querying each possibly file type. Meh. ---AG ++ f.debug_struct("DirEntryRaw") ++ .field("path", &self.path) ++ .field("follow_link", &self.follow_link) ++ .field("depth", &self.depth) ++ .finish() ++ } ++} ++ ++impl DirEntryRaw { ++ fn path(&self) -> &Path { ++ &self.path ++ } ++ ++ fn path_is_symlink(&self) -> bool { ++ self.ty.is_symlink() || self.follow_link ++ } ++ ++ fn metadata(&self) -> Result { ++ self.metadata_internal() ++ } ++ ++ #[cfg(windows)] ++ fn metadata_internal(&self) -> Result { ++ if self.follow_link { ++ fs::metadata(&self.path) ++ } else { ++ Ok(self.metadata.clone()) ++ }.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path)) ++ } ++ ++ #[cfg(not(windows))] ++ fn metadata_internal(&self) -> Result { ++ if self.follow_link { ++ fs::metadata(&self.path) ++ } else { ++ fs::symlink_metadata(&self.path) ++ }.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path)) ++ } ++ ++ fn file_type(&self) -> FileType { ++ self.ty ++ } ++ ++ fn file_name(&self) -> &OsStr { ++ self.path.file_name().unwrap_or_else(|| self.path.as_os_str()) ++ } ++ ++ fn depth(&self) -> usize { ++ self.depth ++ } ++ ++ #[cfg(unix)] ++ fn ino(&self) -> u64 { ++ self.ino ++ } ++ ++ fn from_entry( ++ depth: usize, ++ ent: &fs::DirEntry, ++ ) -> Result { ++ let ty = ent.file_type().map_err(|err| { ++ let err = Error::Io(io::Error::from(err)).with_path(ent.path()); ++ Error::WithDepth { ++ depth: depth, ++ err: Box::new(err), ++ } ++ })?; ++ DirEntryRaw::from_entry_os(depth, ent, ty) ++ } ++ ++ #[cfg(windows)] ++ fn from_entry_os( ++ depth: usize, ++ ent: &fs::DirEntry, ++ ty: fs::FileType, ++ ) -> Result { ++ let md = ent.metadata().map_err(|err| { ++ let err = Error::Io(io::Error::from(err)).with_path(ent.path()); ++ Error::WithDepth { ++ depth: depth, ++ err: Box::new(err), ++ } ++ })?; ++ Ok(DirEntryRaw { ++ path: ent.path(), ++ ty: ty, ++ follow_link: false, ++ depth: depth, ++ metadata: md, ++ }) ++ } ++ ++ #[cfg(unix)] ++ fn from_entry_os( ++ depth: usize, ++ ent: &fs::DirEntry, ++ ty: fs::FileType, ++ ) -> Result { ++ use std::os::unix::fs::DirEntryExt; ++ ++ Ok(DirEntryRaw { ++ path: ent.path(), ++ ty: ty, ++ follow_link: false, ++ depth: depth, ++ ino: ent.ino(), ++ }) ++ } ++ ++ #[cfg(not(unix))] ++ fn from_link(depth: usize, pb: PathBuf) -> Result { ++ let md = fs::metadata(&pb).map_err(|err| { ++ Error::Io(err).with_path(&pb) ++ })?; ++ Ok(DirEntryRaw { ++ path: pb, ++ ty: md.file_type(), ++ follow_link: true, ++ depth: depth, ++ metadata: md, ++ }) ++ } ++ ++ #[cfg(unix)] ++ fn from_link(depth: usize, pb: PathBuf) -> Result { ++ use std::os::unix::fs::MetadataExt; ++ ++ let md = fs::metadata(&pb).map_err(|err| { ++ Error::Io(err).with_path(&pb) ++ })?; ++ Ok(DirEntryRaw { ++ path: pb, ++ ty: md.file_type(), ++ follow_link: true, ++ depth: depth, ++ ino: md.ino(), ++ }) ++ } ++} ++ ++/// WalkBuilder builds a recursive directory iterator. ++/// ++/// The builder supports a large number of configurable options. This includes ++/// specific glob overrides, file type matching, toggling whether hidden ++/// files are ignored or not, and of course, support for respecting gitignore ++/// files. ++/// ++/// By default, all ignore files found are respected. This includes `.ignore`, ++/// `.gitignore`, `.git/info/exclude` and even your global gitignore ++/// globs, usually found in `$XDG_CONFIG_HOME/git/ignore`. ++/// ++/// Some standard recursive directory options are also supported, such as ++/// limiting the recursive depth or whether to follow symbolic links (disabled ++/// by default). ++/// ++/// # Ignore rules ++/// ++/// There are many rules that influence whether a particular file or directory ++/// is skipped by this iterator. Those rules are documented here. Note that ++/// the rules assume a default configuration. ++/// ++/// * First, glob overrides are checked. If a path matches a glob override, ++/// then matching stops. The path is then only skipped if the glob that matched ++/// the path is an ignore glob. (An override glob is a whitelist glob unless it ++/// starts with a `!`, in which case it is an ignore glob.) ++/// * Second, ignore files are checked. Ignore files currently only come from ++/// git ignore files (`.gitignore`, `.git/info/exclude` and the configured ++/// global gitignore file), plain `.ignore` files, which have the same format ++/// as gitignore files, or explicitly added ignore files. The precedence order ++/// is: `.ignore`, `.gitignore`, `.git/info/exclude`, global gitignore and ++/// finally explicitly added ignore files. Note that precedence between ++/// different types of ignore files is not impacted by the directory hierarchy; ++/// any `.ignore` file overrides all `.gitignore` files. Within each precedence ++/// level, more nested ignore files have a higher precedence than less nested ++/// ignore files. ++/// * Third, if the previous step yields an ignore match, then all matching ++/// is stopped and the path is skipped. If it yields a whitelist match, then ++/// matching continues. A whitelist match can be overridden by a later matcher. ++/// * Fourth, unless the path is a directory, the file type matcher is run on ++/// the path. As above, if it yields an ignore match, then all matching is ++/// stopped and the path is skipped. If it yields a whitelist match, then ++/// matching continues. ++/// * Fifth, if the path hasn't been whitelisted and it is hidden, then the ++/// path is skipped. ++/// * Sixth, unless the path is a directory, the size of the file is compared ++/// against the max filesize limit. If it exceeds the limit, it is skipped. ++/// * Seventh, if the path has made it this far then it is yielded in the ++/// iterator. ++#[derive(Clone)] ++pub struct WalkBuilder { ++ paths: Vec, ++ ig_builder: IgnoreBuilder, ++ max_depth: Option, ++ max_filesize: Option, ++ follow_links: bool, ++ sorter: Option cmp::Ordering + Send + Sync + 'static ++ >>, ++ threads: usize, ++} ++ ++impl fmt::Debug for WalkBuilder { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ f.debug_struct("WalkBuilder") ++ .field("paths", &self.paths) ++ .field("ig_builder", &self.ig_builder) ++ .field("max_depth", &self.max_depth) ++ .field("max_filesize", &self.max_filesize) ++ .field("follow_links", &self.follow_links) ++ .field("threads", &self.threads) ++ .finish() ++ } ++} ++ ++impl WalkBuilder { ++ /// Create a new builder for a recursive directory iterator for the ++ /// directory given. ++ /// ++ /// Note that if you want to traverse multiple different directories, it ++ /// is better to call `add` on this builder than to create multiple ++ /// `Walk` values. ++ pub fn new>(path: P) -> WalkBuilder { ++ WalkBuilder { ++ paths: vec![path.as_ref().to_path_buf()], ++ ig_builder: IgnoreBuilder::new(), ++ max_depth: None, ++ max_filesize: None, ++ follow_links: false, ++ sorter: None, ++ threads: 0, ++ } ++ } ++ ++ /// Build a new `Walk` iterator. ++ pub fn build(&self) -> Walk { ++ let follow_links = self.follow_links; ++ let max_depth = self.max_depth; ++ let cmp = self.sorter.clone(); ++ let its = self.paths.iter().map(move |p| { ++ if p == Path::new("-") { ++ (p.to_path_buf(), None) ++ } else { ++ let mut wd = WalkDir::new(p); ++ wd = wd.follow_links(follow_links || path_is_file(p)); ++ if let Some(max_depth) = max_depth { ++ wd = wd.max_depth(max_depth); ++ } ++ if let Some(ref cmp) = cmp { ++ let cmp = cmp.clone(); ++ wd = wd.sort_by(move |a, b| { ++ cmp(a.file_name(), b.file_name()) ++ }); ++ } ++ (p.to_path_buf(), Some(WalkEventIter::from(wd))) ++ } ++ }).collect::>().into_iter(); ++ let ig_root = self.ig_builder.build(); ++ Walk { ++ its: its, ++ it: None, ++ ig_root: ig_root.clone(), ++ ig: ig_root.clone(), ++ max_filesize: self.max_filesize, ++ } ++ } ++ ++ /// Build a new `WalkParallel` iterator. ++ /// ++ /// Note that this *doesn't* return something that implements `Iterator`. ++ /// Instead, the returned value must be run with a closure. e.g., ++ /// `builder.build_parallel().run(|| |path| println!("{:?}", path))`. ++ pub fn build_parallel(&self) -> WalkParallel { ++ WalkParallel { ++ paths: self.paths.clone().into_iter(), ++ ig_root: self.ig_builder.build(), ++ max_depth: self.max_depth, ++ max_filesize: self.max_filesize, ++ follow_links: self.follow_links, ++ threads: self.threads, ++ } ++ } ++ ++ /// Add a file path to the iterator. ++ /// ++ /// Each additional file path added is traversed recursively. This should ++ /// be preferred over building multiple `Walk` iterators since this ++ /// enables reusing resources across iteration. ++ pub fn add>(&mut self, path: P) -> &mut WalkBuilder { ++ self.paths.push(path.as_ref().to_path_buf()); ++ self ++ } ++ ++ /// The maximum depth to recurse. ++ /// ++ /// The default, `None`, imposes no depth restriction. ++ pub fn max_depth(&mut self, depth: Option) -> &mut WalkBuilder { ++ self.max_depth = depth; ++ self ++ } ++ ++ /// Whether to follow symbolic links or not. ++ pub fn follow_links(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.follow_links = yes; ++ self ++ } ++ ++ /// Whether to ignore files above the specified limit. ++ pub fn max_filesize(&mut self, filesize: Option) -> &mut WalkBuilder { ++ self.max_filesize = filesize; ++ self ++ } ++ ++ /// The number of threads to use for traversal. ++ /// ++ /// Note that this only has an effect when using `build_parallel`. ++ /// ++ /// The default setting is `0`, which chooses the number of threads ++ /// automatically using heuristics. ++ pub fn threads(&mut self, n: usize) -> &mut WalkBuilder { ++ self.threads = n; ++ self ++ } ++ ++ /// Add a global ignore file to the matcher. ++ /// ++ /// This has lower precedence than all other sources of ignore rules. ++ /// ++ /// If there was a problem adding the ignore file, then an error is ++ /// returned. Note that the error may indicate *partial* failure. For ++ /// example, if an ignore file contains an invalid glob, all other globs ++ /// are still applied. ++ pub fn add_ignore>(&mut self, path: P) -> Option { ++ let mut builder = GitignoreBuilder::new(""); ++ let mut errs = PartialErrorBuilder::default(); ++ errs.maybe_push(builder.add(path)); ++ match builder.build() { ++ Ok(gi) => { self.ig_builder.add_ignore(gi); } ++ Err(err) => { errs.push(err); } ++ } ++ errs.into_error_option() ++ } ++ ++ /// Add a custom ignore file name ++ /// ++ /// These ignore files have higher precedence than all other ignore files. ++ /// ++ /// When specifying multiple names, earlier names have lower precedence than ++ /// later names. ++ pub fn add_custom_ignore_filename>( ++ &mut self, ++ file_name: S ++ ) -> &mut WalkBuilder { ++ self.ig_builder.add_custom_ignore_filename(file_name); ++ self ++ } ++ ++ /// Add an override matcher. ++ /// ++ /// By default, no override matcher is used. ++ /// ++ /// This overrides any previous setting. ++ pub fn overrides(&mut self, overrides: Override) -> &mut WalkBuilder { ++ self.ig_builder.overrides(overrides); ++ self ++ } ++ ++ /// Add a file type matcher. ++ /// ++ /// By default, no file type matcher is used. ++ /// ++ /// This overrides any previous setting. ++ pub fn types(&mut self, types: Types) -> &mut WalkBuilder { ++ self.ig_builder.types(types); ++ self ++ } ++ ++ /// Enables all the standard ignore filters. ++ /// ++ /// This toggles, as a group, all the filters that are enabled by default: ++ /// ++ /// - [hidden()](#method.hidden) ++ /// - [parents()](#method.parents) ++ /// - [ignore()](#method.ignore) ++ /// - [git_ignore()](#method.git_ignore) ++ /// - [git_global()](#method.git_global) ++ /// - [git_exclude()](#method.git_exclude) ++ /// ++ /// They may still be toggled individually after calling this function. ++ /// ++ /// This is (by definition) enabled by default. ++ pub fn standard_filters(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.hidden(yes) ++ .parents(yes) ++ .ignore(yes) ++ .git_ignore(yes) ++ .git_global(yes) ++ .git_exclude(yes) ++ } ++ ++ /// Enables ignoring hidden files. ++ /// ++ /// This is enabled by default. ++ pub fn hidden(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.hidden(yes); ++ self ++ } ++ ++ /// Enables reading ignore files from parent directories. ++ /// ++ /// If this is enabled, then .gitignore files in parent directories of each ++ /// file path given are respected. Otherwise, they are ignored. ++ /// ++ /// This is enabled by default. ++ pub fn parents(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.parents(yes); ++ self ++ } ++ ++ /// Enables reading `.ignore` files. ++ /// ++ /// `.ignore` files have the same semantics as `gitignore` files and are ++ /// supported by search tools such as ripgrep and The Silver Searcher. ++ /// ++ /// This is enabled by default. ++ pub fn ignore(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.ignore(yes); ++ self ++ } ++ ++ /// Enables reading a global gitignore file, whose path is specified in ++ /// git's `core.excludesFile` config option. ++ /// ++ /// Git's config file location is `$HOME/.gitconfig`. If `$HOME/.gitconfig` ++ /// does not exist or does not specify `core.excludesFile`, then ++ /// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not ++ /// set or is empty, then `$HOME/.config/git/ignore` is used instead. ++ /// ++ /// This is enabled by default. ++ pub fn git_global(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.git_global(yes); ++ self ++ } ++ ++ /// Enables reading `.gitignore` files. ++ /// ++ /// `.gitignore` files have match semantics as described in the `gitignore` ++ /// man page. ++ /// ++ /// This is enabled by default. ++ pub fn git_ignore(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.git_ignore(yes); ++ self ++ } ++ ++ /// Enables reading `.git/info/exclude` files. ++ /// ++ /// `.git/info/exclude` files have match semantics as described in the ++ /// `gitignore` man page. ++ /// ++ /// This is enabled by default. ++ pub fn git_exclude(&mut self, yes: bool) -> &mut WalkBuilder { ++ self.ig_builder.git_exclude(yes); ++ self ++ } ++ ++ /// Set a function for sorting directory entries by file name. ++ /// ++ /// If a compare function is set, the resulting iterator will return all ++ /// paths in sorted order. The compare function will be called to compare ++ /// names from entries from the same directory using only the name of the ++ /// entry. ++ /// ++ /// Note that this is not used in the parallel iterator. ++ pub fn sort_by_file_name(&mut self, cmp: F) -> &mut WalkBuilder ++ where F: Fn(&OsStr, &OsStr) -> cmp::Ordering + Send + Sync + 'static ++ { ++ self.sorter = Some(Arc::new(cmp)); ++ self ++ } ++} ++ ++/// Walk is a recursive directory iterator over file paths in one or more ++/// directories. ++/// ++/// Only file and directory paths matching the rules are returned. By default, ++/// ignore files like `.gitignore` are respected. The precise matching rules ++/// and precedence is explained in the documentation for `WalkBuilder`. ++pub struct Walk { ++ its: vec::IntoIter<(PathBuf, Option)>, ++ it: Option, ++ ig_root: Ignore, ++ ig: Ignore, ++ max_filesize: Option, ++} ++ ++impl Walk { ++ /// Creates a new recursive directory iterator for the file path given. ++ /// ++ /// Note that this uses default settings, which include respecting ++ /// `.gitignore` files. To configure the iterator, use `WalkBuilder` ++ /// instead. ++ pub fn new>(path: P) -> Walk { ++ WalkBuilder::new(path).build() ++ } ++ ++ fn skip_entry(&self, ent: &walkdir::DirEntry) -> bool { ++ if ent.depth() == 0 { ++ return false; ++ } ++ ++ let is_dir = walkdir_entry_is_dir(ent); ++ let max_size = self.max_filesize; ++ let should_skip_path = skip_path(&self.ig, ent.path(), is_dir); ++ let should_skip_filesize = if !is_dir && max_size.is_some() { ++ skip_filesize(max_size.unwrap(), ent.path(), &ent.metadata().ok()) ++ } else { ++ false ++ }; ++ ++ should_skip_path || should_skip_filesize ++ } ++} ++ ++impl Iterator for Walk { ++ type Item = Result; ++ ++ #[inline(always)] ++ fn next(&mut self) -> Option> { ++ loop { ++ let ev = match self.it.as_mut().and_then(|it| it.next()) { ++ Some(ev) => ev, ++ None => { ++ match self.its.next() { ++ None => return None, ++ Some((_, None)) => { ++ return Some(Ok(DirEntry::new_stdin())); ++ } ++ Some((path, Some(it))) => { ++ self.it = Some(it); ++ if path_is_dir(&path) { ++ let (ig, err) = self.ig_root.add_parents(path); ++ self.ig = ig; ++ if let Some(err) = err { ++ return Some(Err(err)); ++ } ++ } else { ++ self.ig = self.ig_root.clone(); ++ } ++ } ++ } ++ continue; ++ } ++ }; ++ match ev { ++ Err(err) => { ++ return Some(Err(Error::from_walkdir(err))); ++ } ++ Ok(WalkEvent::Exit) => { ++ self.ig = self.ig.parent().unwrap(); ++ } ++ Ok(WalkEvent::Dir(ent)) => { ++ if self.skip_entry(&ent) { ++ self.it.as_mut().unwrap().it.skip_current_dir(); ++ // Still need to push this on the stack because ++ // we'll get a WalkEvent::Exit event for this dir. ++ // We don't care if it errors though. ++ let (igtmp, _) = self.ig.add_child(ent.path()); ++ self.ig = igtmp; ++ continue; ++ } ++ let (igtmp, err) = self.ig.add_child(ent.path()); ++ self.ig = igtmp; ++ return Some(Ok(DirEntry::new_walkdir(ent, err))); ++ } ++ Ok(WalkEvent::File(ent)) => { ++ if self.skip_entry(&ent) { ++ continue; ++ } ++ return Some(Ok(DirEntry::new_walkdir(ent, None))); ++ } ++ } ++ } ++ } ++} ++ ++/// WalkEventIter transforms a WalkDir iterator into an iterator that more ++/// accurately describes the directory tree. Namely, it emits events that are ++/// one of three types: directory, file or "exit." An "exit" event means that ++/// the entire contents of a directory have been enumerated. ++struct WalkEventIter { ++ depth: usize, ++ it: walkdir::IntoIter, ++ next: Option>, ++} ++ ++#[derive(Debug)] ++enum WalkEvent { ++ Dir(walkdir::DirEntry), ++ File(walkdir::DirEntry), ++ Exit, ++} ++ ++impl From for WalkEventIter { ++ fn from(it: WalkDir) -> WalkEventIter { ++ WalkEventIter { depth: 0, it: it.into_iter(), next: None } ++ } ++} ++ ++impl Iterator for WalkEventIter { ++ type Item = walkdir::Result; ++ ++ #[inline(always)] ++ fn next(&mut self) -> Option> { ++ let dent = self.next.take().or_else(|| self.it.next()); ++ let depth = match dent { ++ None => 0, ++ Some(Ok(ref dent)) => dent.depth(), ++ Some(Err(ref err)) => err.depth(), ++ }; ++ if depth < self.depth { ++ self.depth -= 1; ++ self.next = dent; ++ return Some(Ok(WalkEvent::Exit)); ++ } ++ self.depth = depth; ++ match dent { ++ None => None, ++ Some(Err(err)) => Some(Err(err)), ++ Some(Ok(dent)) => { ++ if walkdir_entry_is_dir(&dent) { ++ self.depth += 1; ++ Some(Ok(WalkEvent::Dir(dent))) ++ } else { ++ Some(Ok(WalkEvent::File(dent))) ++ } ++ } ++ } ++ } ++} ++ ++/// WalkState is used in the parallel recursive directory iterator to indicate ++/// whether walking should continue as normal, skip descending into a ++/// particular directory or quit the walk entirely. ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub enum WalkState { ++ /// Continue walking as normal. ++ Continue, ++ /// If the directory entry given is a directory, don't descend into it. ++ /// In all other cases, this has no effect. ++ Skip, ++ /// Quit the entire iterator as soon as possible. ++ /// ++ /// Note that this is an inherently asynchronous action. It is possible ++ /// for more entries to be yielded even after instructing the iterator ++ /// to quit. ++ Quit, ++} ++ ++impl WalkState { ++ fn is_quit(&self) -> bool { ++ *self == WalkState::Quit ++ } ++} ++ ++/// WalkParallel is a parallel recursive directory iterator over files paths ++/// in one or more directories. ++/// ++/// Only file and directory paths matching the rules are returned. By default, ++/// ignore files like `.gitignore` are respected. The precise matching rules ++/// and precedence is explained in the documentation for `WalkBuilder`. ++/// ++/// Unlike `Walk`, this uses multiple threads for traversing a directory. ++pub struct WalkParallel { ++ paths: vec::IntoIter, ++ ig_root: Ignore, ++ max_filesize: Option, ++ max_depth: Option, ++ follow_links: bool, ++ threads: usize, ++} ++ ++impl WalkParallel { ++ /// Execute the parallel recursive directory iterator. `mkf` is called ++ /// for each thread used for iteration. The function produced by `mkf` ++ /// is then in turn called for each visited file path. ++ pub fn run( ++ self, ++ mut mkf: F, ++ ) where F: FnMut() -> Box) -> WalkState + Send + 'static> { ++ let mut f = mkf(); ++ let threads = self.threads(); ++ let queue = Arc::new(MsQueue::new()); ++ let mut any_work = false; ++ // Send the initial set of root paths to the pool of workers. ++ // Note that we only send directories. For files, we send to them the ++ // callback directly. ++ for path in self.paths { ++ let dent = ++ if path == Path::new("-") { ++ DirEntry::new_stdin() ++ } else { ++ match DirEntryRaw::from_link(0, path) { ++ Ok(dent) => DirEntry::new_raw(dent, None), ++ Err(err) => { ++ if f(Err(err)).is_quit() { ++ return; ++ } ++ continue; ++ } ++ } ++ }; ++ queue.push(Message::Work(Work { ++ dent: dent, ++ ignore: self.ig_root.clone(), ++ })); ++ any_work = true; ++ } ++ // ... but there's no need to start workers if we don't need them. ++ if !any_work { ++ return; ++ } ++ // Create the workers and then wait for them to finish. ++ let num_waiting = Arc::new(AtomicUsize::new(0)); ++ let num_quitting = Arc::new(AtomicUsize::new(0)); ++ let quit_now = Arc::new(AtomicBool::new(false)); ++ let mut handles = vec![]; ++ for _ in 0..threads { ++ let worker = Worker { ++ f: mkf(), ++ queue: queue.clone(), ++ quit_now: quit_now.clone(), ++ is_waiting: false, ++ is_quitting: false, ++ num_waiting: num_waiting.clone(), ++ num_quitting: num_quitting.clone(), ++ threads: threads, ++ max_depth: self.max_depth, ++ max_filesize: self.max_filesize, ++ follow_links: self.follow_links, ++ }; ++ handles.push(thread::spawn(|| worker.run())); ++ } ++ for handle in handles { ++ handle.join().unwrap(); ++ } ++ } ++ ++ fn threads(&self) -> usize { ++ if self.threads == 0 { ++ 2 ++ } else { ++ self.threads ++ } ++ } ++} ++ ++/// Message is the set of instructions that a worker knows how to process. ++enum Message { ++ /// A work item corresponds to a directory that should be descended into. ++ /// Work items for entries that should be skipped or ignored should not ++ /// be produced. ++ Work(Work), ++ /// This instruction indicates that the worker should start quitting. ++ Quit, ++} ++ ++/// A unit of work for each worker to process. ++/// ++/// Each unit of work corresponds to a directory that should be descended ++/// into. ++struct Work { ++ /// The directory entry. ++ dent: DirEntry, ++ /// Any ignore matchers that have been built for this directory's parents. ++ ignore: Ignore, ++} ++ ++impl Work { ++ /// Returns true if and only if this work item is a directory. ++ fn is_dir(&self) -> bool { ++ self.dent.is_dir() ++ } ++ ++ /// Returns true if and only if this work item is a symlink. ++ fn is_symlink(&self) -> bool { ++ self.dent.file_type().map_or(false, |ft| ft.is_symlink()) ++ } ++ ++ /// Adds ignore rules for parent directories. ++ /// ++ /// Note that this only applies to entries at depth 0. On all other ++ /// entries, this is a no-op. ++ fn add_parents(&mut self) -> Option { ++ if self.dent.depth() > 0 { ++ return None; ++ } ++ // At depth 0, the path of this entry is a root path, so we can ++ // use it directly to add parent ignore rules. ++ let (ig, err) = self.ignore.add_parents(self.dent.path()); ++ self.ignore = ig; ++ err ++ } ++ ++ /// Reads the directory contents of this work item and adds ignore ++ /// rules for this directory. ++ /// ++ /// If there was a problem with reading the directory contents, then ++ /// an error is returned. If there was a problem reading the ignore ++ /// rules for this directory, then the error is attached to this ++ /// work item's directory entry. ++ fn read_dir(&mut self) -> Result { ++ let readdir = match fs::read_dir(self.dent.path()) { ++ Ok(readdir) => readdir, ++ Err(err) => { ++ let err = Error::from(err) ++ .with_path(self.dent.path()) ++ .with_depth(self.dent.depth()); ++ return Err(err); ++ } ++ }; ++ let (ig, err) = self.ignore.add_child(self.dent.path()); ++ self.ignore = ig; ++ self.dent.err = err; ++ Ok(readdir) ++ } ++} ++ ++/// A worker is responsible for descending into directories, updating the ++/// ignore matchers, producing new work and invoking the caller's callback. ++/// ++/// Note that a worker is *both* a producer and a consumer. ++struct Worker { ++ /// The caller's callback. ++ f: Box) -> WalkState + Send + 'static>, ++ /// A queue of work items. This is multi-producer and multi-consumer. ++ queue: Arc>, ++ /// Whether all workers should quit at the next opportunity. Note that ++ /// this is distinct from quitting because of exhausting the contents of ++ /// a directory. Instead, this is used when the caller's callback indicates ++ /// that the iterator should quit immediately. ++ quit_now: Arc, ++ /// Whether this worker is waiting for more work. ++ is_waiting: bool, ++ /// Whether this worker has started to quit. ++ is_quitting: bool, ++ /// The number of workers waiting for more work. ++ num_waiting: Arc, ++ /// The number of workers waiting to quit. ++ num_quitting: Arc, ++ /// The total number of workers. ++ threads: usize, ++ /// The maximum depth of directories to descend. A value of `0` means no ++ /// descension at all. ++ max_depth: Option, ++ /// The maximum size a searched file can be (in bytes). If a file exceeds ++ /// this size it will be skipped. ++ max_filesize: Option, ++ /// Whether to follow symbolic links or not. When this is enabled, loop ++ /// detection is performed. ++ follow_links: bool, ++} ++ ++impl Worker { ++ /// Runs this worker until there is no more work left to do. ++ /// ++ /// The worker will call the caller's callback for all entries that aren't ++ /// skipped by the ignore matcher. ++ fn run(mut self) { ++ while let Some(mut work) = self.get_work() { ++ // If the work is not a directory, then we can just execute the ++ // caller's callback immediately and move on. ++ if work.is_symlink() || !work.is_dir() { ++ if (self.f)(Ok(work.dent)).is_quit() { ++ self.quit_now(); ++ return; ++ } ++ continue; ++ } ++ if let Some(err) = work.add_parents() { ++ if (self.f)(Err(err)).is_quit() { ++ self.quit_now(); ++ return; ++ } ++ } ++ let readdir = match work.read_dir() { ++ Ok(readdir) => readdir, ++ Err(err) => { ++ if (self.f)(Err(err)).is_quit() { ++ self.quit_now(); ++ return; ++ } ++ continue; ++ } ++ }; ++ let depth = work.dent.depth(); ++ match (self.f)(Ok(work.dent)) { ++ WalkState::Continue => {} ++ WalkState::Skip => continue, ++ WalkState::Quit => { ++ self.quit_now(); ++ return; ++ } ++ } ++ if self.max_depth.map_or(false, |max| depth >= max) { ++ continue; ++ } ++ for result in readdir { ++ if self.run_one(&work.ignore, depth + 1, result).is_quit() { ++ self.quit_now(); ++ return; ++ } ++ } ++ } ++ } ++ ++ /// Runs the worker on a single entry from a directory iterator. ++ /// ++ /// If the entry is a path that should be ignored, then this is a no-op. ++ /// Otherwise, the entry is pushed on to the queue. (The actual execution ++ /// of the callback happens in `run`.) ++ /// ++ /// If an error occurs while reading the entry, then it is sent to the ++ /// caller's callback. ++ /// ++ /// `ig` is the `Ignore` matcher for the parent directory. `depth` should ++ /// be the depth of this entry. `result` should be the item yielded by ++ /// a directory iterator. ++ fn run_one( ++ &mut self, ++ ig: &Ignore, ++ depth: usize, ++ result: Result, ++ ) -> WalkState { ++ let fs_dent = match result { ++ Ok(fs_dent) => fs_dent, ++ Err(err) => { ++ return (self.f)(Err(Error::from(err).with_depth(depth))); ++ } ++ }; ++ let mut dent = match DirEntryRaw::from_entry(depth, &fs_dent) { ++ Ok(dent) => DirEntry::new_raw(dent, None), ++ Err(err) => { ++ return (self.f)(Err(err)); ++ } ++ }; ++ let is_symlink = dent.file_type().map_or(false, |ft| ft.is_symlink()); ++ if self.follow_links && is_symlink { ++ let path = dent.path().to_path_buf(); ++ dent = match DirEntryRaw::from_link(depth, path) { ++ Ok(dent) => DirEntry::new_raw(dent, None), ++ Err(err) => { ++ return (self.f)(Err(err)); ++ } ++ }; ++ if dent.is_dir() { ++ if let Err(err) = check_symlink_loop(ig, dent.path(), depth) { ++ return (self.f)(Err(err)); ++ } ++ } ++ } ++ let is_dir = dent.is_dir(); ++ let max_size = self.max_filesize; ++ let should_skip_path = skip_path(ig, dent.path(), is_dir); ++ let should_skip_filesize = if !is_dir && max_size.is_some() { ++ skip_filesize(max_size.unwrap(), dent.path(), &dent.metadata().ok()) ++ } else { ++ false ++ }; ++ ++ if !should_skip_path && !should_skip_filesize { ++ self.queue.push(Message::Work(Work { ++ dent: dent, ++ ignore: ig.clone(), ++ })); ++ } ++ WalkState::Continue ++ } ++ ++ /// Returns the next directory to descend into. ++ /// ++ /// If all work has been exhausted, then this returns None. The worker ++ /// should then subsequently quit. ++ fn get_work(&mut self) -> Option { ++ loop { ++ if self.is_quit_now() { ++ return None; ++ } ++ match self.queue.try_pop() { ++ Some(Message::Work(work)) => { ++ self.waiting(false); ++ self.quitting(false); ++ return Some(work); ++ } ++ Some(Message::Quit) => { ++ // We can't just quit because a Message::Quit could be ++ // spurious. For example, it's possible to observe that ++ // all workers are waiting even if there's more work to ++ // be done. ++ // ++ // Therefore, we do a bit of a dance to wait until all ++ // workers have signaled that they're ready to quit before ++ // actually quitting. ++ // ++ // If the Quit message turns out to be spurious, then the ++ // loop below will break and we'll go back to looking for ++ // more work. ++ self.waiting(true); ++ self.quitting(true); ++ while !self.is_quit_now() { ++ let nwait = self.num_waiting(); ++ let nquit = self.num_quitting(); ++ // If the number of waiting workers dropped, then ++ // abort our attempt to quit. ++ if nwait < self.threads { ++ break; ++ } ++ // If all workers are in this quit loop, then we ++ // can stop. ++ if nquit == self.threads { ++ return None; ++ } ++ // Otherwise, spin. ++ } ++ } ++ None => { ++ self.waiting(true); ++ self.quitting(false); ++ if self.num_waiting() == self.threads { ++ for _ in 0..self.threads { ++ self.queue.push(Message::Quit); ++ } ++ } else { ++ // You're right to consider this suspicious, but it's ++ // a useful heuristic to permit producers to catch up ++ // to consumers without burning the CPU. It is also ++ // useful as a means to prevent burning the CPU if only ++ // one worker is left doing actual work. It's not ++ // perfect and it doesn't leave the CPU completely ++ // idle, but it's not clear what else we can do. :-/ ++ thread::sleep(Duration::from_millis(1)); ++ } ++ } ++ } ++ } ++ } ++ ++ /// Indicates that all workers should quit immediately. ++ fn quit_now(&self) { ++ self.quit_now.store(true, Ordering::SeqCst); ++ } ++ ++ /// Returns true if this worker should quit immediately. ++ fn is_quit_now(&self) -> bool { ++ self.quit_now.load(Ordering::SeqCst) ++ } ++ ++ /// Returns the total number of workers waiting for work. ++ fn num_waiting(&self) -> usize { ++ self.num_waiting.load(Ordering::SeqCst) ++ } ++ ++ /// Returns the total number of workers ready to quit. ++ fn num_quitting(&self) -> usize { ++ self.num_quitting.load(Ordering::SeqCst) ++ } ++ ++ /// Sets this worker's "quitting" state to the value of `yes`. ++ fn quitting(&mut self, yes: bool) { ++ if yes { ++ if !self.is_quitting { ++ self.is_quitting = true; ++ self.num_quitting.fetch_add(1, Ordering::SeqCst); ++ } ++ } else { ++ if self.is_quitting { ++ self.is_quitting = false; ++ self.num_quitting.fetch_sub(1, Ordering::SeqCst); ++ } ++ } ++ } ++ ++ /// Sets this worker's "waiting" state to the value of `yes`. ++ fn waiting(&mut self, yes: bool) { ++ if yes { ++ if !self.is_waiting { ++ self.is_waiting = true; ++ self.num_waiting.fetch_add(1, Ordering::SeqCst); ++ } ++ } else { ++ if self.is_waiting { ++ self.is_waiting = false; ++ self.num_waiting.fetch_sub(1, Ordering::SeqCst); ++ } ++ } ++ } ++} ++ ++fn check_symlink_loop( ++ ig_parent: &Ignore, ++ child_path: &Path, ++ child_depth: usize, ++) -> Result<(), Error> { ++ let hchild = Handle::from_path(child_path).map_err(|err| { ++ Error::from(err).with_path(child_path).with_depth(child_depth) ++ })?; ++ for ig in ig_parent.parents().take_while(|ig| !ig.is_absolute_parent()) { ++ let h = Handle::from_path(ig.path()).map_err(|err| { ++ Error::from(err).with_path(child_path).with_depth(child_depth) ++ })?; ++ if hchild == h { ++ return Err(Error::Loop { ++ ancestor: ig.path().to_path_buf(), ++ child: child_path.to_path_buf(), ++ }.with_depth(child_depth)); ++ } ++ } ++ Ok(()) ++} ++ ++// Before calling this function, make sure that you ensure that is really ++// necessary as the arguments imply a file stat. ++fn skip_filesize( ++ max_filesize: u64, ++ path: &Path, ++ ent: &Option ++) -> bool { ++ let filesize = match *ent { ++ Some(ref md) => Some(md.len()), ++ None => None ++ }; ++ ++ if let Some(fs) = filesize { ++ if fs > max_filesize { ++ debug!("ignoring {}: {} bytes", path.display(), fs); ++ true ++ } else { ++ false ++ } ++ } else { ++ false ++ } ++} ++ ++fn skip_path(ig: &Ignore, path: &Path, is_dir: bool) -> bool { ++ let m = ig.matched(path, is_dir); ++ if m.is_ignore() { ++ debug!("ignoring {}: {:?}", path.display(), m); ++ true ++ } else if m.is_whitelist() { ++ debug!("whitelisting {}: {:?}", path.display(), m); ++ false ++ } else { ++ false ++ } ++} ++ ++/// Returns true if and only if this path points to a directory. ++/// ++/// This works around a bug in Rust's standard library: ++/// https://github.com/rust-lang/rust/issues/46484 ++#[cfg(windows)] ++fn path_is_dir(path: &Path) -> bool { ++ fs::metadata(path).map(|md| metadata_is_dir(&md)).unwrap_or(false) ++} ++ ++/// Returns true if and only if this entry points to a directory. ++#[cfg(not(windows))] ++fn path_is_dir(path: &Path) -> bool { ++ path.is_dir() ++} ++ ++/// Returns true if and only if this path points to a file. ++/// ++/// This works around a bug in Rust's standard library: ++/// https://github.com/rust-lang/rust/issues/46484 ++#[cfg(windows)] ++fn path_is_file(path: &Path) -> bool { ++ !path_is_dir(path) ++} ++ ++/// Returns true if and only if this entry points to a directory. ++#[cfg(not(windows))] ++fn path_is_file(path: &Path) -> bool { ++ path.is_file() ++} ++ ++/// Returns true if and only if the given walkdir entry points to a directory. ++/// ++/// This works around a bug in Rust's standard library: ++/// https://github.com/rust-lang/rust/issues/46484 ++#[cfg(windows)] ++fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool { ++ dent.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false) ++} ++ ++/// Returns true if and only if the given walkdir entry points to a directory. ++#[cfg(not(windows))] ++fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool { ++ dent.file_type().is_dir() ++} ++ ++/// Returns true if and only if the given metadata points to a directory. ++/// ++/// This works around a bug in Rust's standard library: ++/// https://github.com/rust-lang/rust/issues/46484 ++#[cfg(windows)] ++fn metadata_is_dir(md: &fs::Metadata) -> bool { ++ use std::os::windows::fs::MetadataExt; ++ use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY; ++ md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::fs::{self, File}; ++ use std::io::Write; ++ use std::path::Path; ++ use std::sync::{Arc, Mutex}; ++ ++ use tempdir::TempDir; ++ ++ use super::{WalkBuilder, WalkState}; ++ ++ fn wfile>(path: P, contents: &str) { ++ let mut file = File::create(path).unwrap(); ++ file.write_all(contents.as_bytes()).unwrap(); ++ } ++ ++ fn wfile_size>(path: P, size: u64) { ++ let file = File::create(path).unwrap(); ++ file.set_len(size).unwrap(); ++ } ++ ++ #[cfg(unix)] ++ fn symlink, Q: AsRef>(src: P, dst: Q) { ++ use std::os::unix::fs::symlink; ++ symlink(src, dst).unwrap(); ++ } ++ ++ fn mkdirp>(path: P) { ++ fs::create_dir_all(path).unwrap(); ++ } ++ ++ fn normal_path(unix: &str) -> String { ++ if cfg!(windows) { ++ unix.replace("\\", "/") ++ } else { ++ unix.to_string() ++ } ++ } ++ ++ fn walk_collect(prefix: &Path, builder: &WalkBuilder) -> Vec { ++ let mut paths = vec![]; ++ for result in builder.build() { ++ let dent = match result { ++ Err(_) => continue, ++ Ok(dent) => dent, ++ }; ++ let path = dent.path().strip_prefix(prefix).unwrap(); ++ if path.as_os_str().is_empty() { ++ continue; ++ } ++ paths.push(normal_path(path.to_str().unwrap())); ++ } ++ paths.sort(); ++ paths ++ } ++ ++ fn walk_collect_parallel( ++ prefix: &Path, ++ builder: &WalkBuilder, ++ ) -> Vec { ++ let paths = Arc::new(Mutex::new(vec![])); ++ let prefix = Arc::new(prefix.to_path_buf()); ++ builder.build_parallel().run(|| { ++ let paths = paths.clone(); ++ let prefix = prefix.clone(); ++ Box::new(move |result| { ++ let dent = match result { ++ Err(_) => return WalkState::Continue, ++ Ok(dent) => dent, ++ }; ++ let path = dent.path().strip_prefix(&**prefix).unwrap(); ++ if path.as_os_str().is_empty() { ++ return WalkState::Continue; ++ } ++ let mut paths = paths.lock().unwrap(); ++ paths.push(normal_path(path.to_str().unwrap())); ++ WalkState::Continue ++ }) ++ }); ++ let mut paths = paths.lock().unwrap(); ++ paths.sort(); ++ paths.to_vec() ++ } ++ ++ fn mkpaths(paths: &[&str]) -> Vec { ++ let mut paths: Vec<_> = paths.iter().map(|s| s.to_string()).collect(); ++ paths.sort(); ++ paths ++ } ++ ++ fn assert_paths( ++ prefix: &Path, ++ builder: &WalkBuilder, ++ expected: &[&str], ++ ) { ++ let got = walk_collect(prefix, builder); ++ assert_eq!(got, mkpaths(expected)); ++ let got = walk_collect_parallel(prefix, builder); ++ assert_eq!(got, mkpaths(expected)); ++ } ++ ++ #[test] ++ fn no_ignores() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join("a/b/c")); ++ mkdirp(td.path().join("x/y")); ++ wfile(td.path().join("a/b/foo"), ""); ++ wfile(td.path().join("x/y/foo"), ""); ++ ++ assert_paths(td.path(), &WalkBuilder::new(td.path()), &[ ++ "x", "x/y", "x/y/foo", "a", "a/b", "a/b/foo", "a/b/c", ++ ]); ++ } ++ ++ #[test] ++ fn custom_ignore() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ let custom_ignore = ".customignore"; ++ mkdirp(td.path().join("a")); ++ wfile(td.path().join(custom_ignore), "foo"); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("bar"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ builder.add_custom_ignore_filename(&custom_ignore); ++ assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]); ++ } ++ ++ #[test] ++ fn custom_ignore_exclusive_use() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ let custom_ignore = ".customignore"; ++ mkdirp(td.path().join("a")); ++ wfile(td.path().join(custom_ignore), "foo"); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("bar"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ builder.ignore(false); ++ builder.git_ignore(false); ++ builder.git_global(false); ++ builder.git_exclude(false); ++ builder.add_custom_ignore_filename(&custom_ignore); ++ assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]); ++ } ++ ++ #[test] ++ fn gitignore() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ mkdirp(td.path().join("a")); ++ wfile(td.path().join(".gitignore"), "foo"); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("bar"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ assert_paths(td.path(), &WalkBuilder::new(td.path()), &[ ++ "bar", "a", "a/bar", ++ ]); ++ } ++ ++ #[test] ++ fn explicit_ignore() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ let igpath = td.path().join(".not-an-ignore"); ++ mkdirp(td.path().join("a")); ++ wfile(&igpath, "foo"); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("bar"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ assert!(builder.add_ignore(&igpath).is_none()); ++ assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]); ++ } ++ ++ #[test] ++ fn explicit_ignore_exclusive_use() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ let igpath = td.path().join(".not-an-ignore"); ++ mkdirp(td.path().join("a")); ++ wfile(&igpath, "foo"); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("bar"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ builder.standard_filters(false); ++ assert!(builder.add_ignore(&igpath).is_none()); ++ assert_paths(td.path(), &builder, ++ &[".not-an-ignore", "bar", "a", "a/bar"]); ++ } ++ ++ #[test] ++ fn gitignore_parent() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join(".git")); ++ mkdirp(td.path().join("a")); ++ wfile(td.path().join(".gitignore"), "foo"); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("a/bar"), ""); ++ ++ let root = td.path().join("a"); ++ assert_paths(&root, &WalkBuilder::new(&root), &["bar"]); ++ } ++ ++ #[test] ++ fn max_depth() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join("a/b/c")); ++ wfile(td.path().join("foo"), ""); ++ wfile(td.path().join("a/foo"), ""); ++ wfile(td.path().join("a/b/foo"), ""); ++ wfile(td.path().join("a/b/c/foo"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ assert_paths(td.path(), &builder, &[ ++ "a", "a/b", "a/b/c", "foo", "a/foo", "a/b/foo", "a/b/c/foo", ++ ]); ++ assert_paths(td.path(), builder.max_depth(Some(0)), &[]); ++ assert_paths(td.path(), builder.max_depth(Some(1)), &["a", "foo"]); ++ assert_paths(td.path(), builder.max_depth(Some(2)), &[ ++ "a", "a/b", "foo", "a/foo", ++ ]); ++ } ++ ++ #[test] ++ fn max_filesize() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join("a/b")); ++ wfile_size(td.path().join("foo"), 0); ++ wfile_size(td.path().join("bar"), 400); ++ wfile_size(td.path().join("baz"), 600); ++ wfile_size(td.path().join("a/foo"), 600); ++ wfile_size(td.path().join("a/bar"), 500); ++ wfile_size(td.path().join("a/baz"), 200); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ assert_paths(td.path(), &builder, &[ ++ "a", "a/b", "foo", "bar", "baz", "a/foo", "a/bar", "a/baz", ++ ]); ++ assert_paths(td.path(), builder.max_filesize(Some(0)), &[ ++ "a", "a/b", "foo" ++ ]); ++ assert_paths(td.path(), builder.max_filesize(Some(500)), &[ ++ "a", "a/b", "foo", "bar", "a/bar", "a/baz" ++ ]); ++ assert_paths(td.path(), builder.max_filesize(Some(50000)), &[ ++ "a", "a/b", "foo", "bar", "baz", "a/foo", "a/bar", "a/baz", ++ ]); ++ } ++ ++ #[cfg(unix)] // because symlinks on windows are weird ++ #[test] ++ fn symlinks() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join("a/b")); ++ symlink(td.path().join("a/b"), td.path().join("z")); ++ wfile(td.path().join("a/b/foo"), ""); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ assert_paths(td.path(), &builder, &[ ++ "a", "a/b", "a/b/foo", "z", ++ ]); ++ assert_paths(td.path(), &builder.follow_links(true), &[ ++ "a", "a/b", "a/b/foo", "z", "z/foo", ++ ]); ++ } ++ ++ #[cfg(unix)] // because symlinks on windows are weird ++ #[test] ++ fn symlink_loop() { ++ let td = TempDir::new("walk-test-").unwrap(); ++ mkdirp(td.path().join("a/b")); ++ symlink(td.path().join("a"), td.path().join("a/b/c")); ++ ++ let mut builder = WalkBuilder::new(td.path()); ++ assert_paths(td.path(), &builder, &[ ++ "a", "a/b", "a/b/c", ++ ]); ++ assert_paths(td.path(), &builder.follow_links(true), &[ ++ "a", "a/b", ++ ]); ++ } ++} diff --cc vendor/ignore-0.4.3/tests/gitignore_matched_path_or_any_parents_tests.gitignore index 000000000,000000000..ac09e12f7 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/tests/gitignore_matched_path_or_any_parents_tests.gitignore @@@ -1,0 -1,0 +1,216 @@@ ++# Based on https://github.com/behnam/gitignore-test/blob/master/.gitignore ++ ++### file in root ++ ++# MATCH /file_root_1 ++file_root_00 ++ ++# NO_MATCH ++file_root_01/ ++ ++# NO_MATCH ++file_root_02/* ++ ++# NO_MATCH ++file_root_03/** ++ ++ ++# MATCH /file_root_10 ++/file_root_10 ++ ++# NO_MATCH ++/file_root_11/ ++ ++# NO_MATCH ++/file_root_12/* ++ ++# NO_MATCH ++/file_root_13/** ++ ++ ++# NO_MATCH ++*/file_root_20 ++ ++# NO_MATCH ++*/file_root_21/ ++ ++# NO_MATCH ++*/file_root_22/* ++ ++# NO_MATCH ++*/file_root_23/** ++ ++ ++# MATCH /file_root_30 ++**/file_root_30 ++ ++# NO_MATCH ++**/file_root_31/ ++ ++# NO_MATCH ++**/file_root_32/* ++ ++# NO_MATCH ++**/file_root_33/** ++ ++ ++### file in sub-dir ++ ++# MATCH /parent_dir/file_deep_1 ++file_deep_00 ++ ++# NO_MATCH ++file_deep_01/ ++ ++# NO_MATCH ++file_deep_02/* ++ ++# NO_MATCH ++file_deep_03/** ++ ++ ++# NO_MATCH ++/file_deep_10 ++ ++# NO_MATCH ++/file_deep_11/ ++ ++# NO_MATCH ++/file_deep_12/* ++ ++# NO_MATCH ++/file_deep_13/** ++ ++ ++# MATCH /parent_dir/file_deep_20 ++*/file_deep_20 ++ ++# NO_MATCH ++*/file_deep_21/ ++ ++# NO_MATCH ++*/file_deep_22/* ++ ++# NO_MATCH ++*/file_deep_23/** ++ ++ ++# MATCH /parent_dir/file_deep_30 ++**/file_deep_30 ++ ++# NO_MATCH ++**/file_deep_31/ ++ ++# NO_MATCH ++**/file_deep_32/* ++ ++# NO_MATCH ++**/file_deep_33/** ++ ++ ++### dir in root ++ ++# MATCH /dir_root_00 ++dir_root_00 ++ ++# MATCH /dir_root_01 ++dir_root_01/ ++ ++# MATCH /dir_root_02 ++dir_root_02/* ++ ++# MATCH /dir_root_03 ++dir_root_03/** ++ ++ ++# MATCH /dir_root_10 ++/dir_root_10 ++ ++# MATCH /dir_root_11 ++/dir_root_11/ ++ ++# MATCH /dir_root_12 ++/dir_root_12/* ++ ++# MATCH /dir_root_13 ++/dir_root_13/** ++ ++ ++# NO_MATCH ++*/dir_root_20 ++ ++# NO_MATCH ++*/dir_root_21/ ++ ++# NO_MATCH ++*/dir_root_22/* ++ ++# NO_MATCH ++*/dir_root_23/** ++ ++ ++# MATCH /dir_root_30 ++**/dir_root_30 ++ ++# MATCH /dir_root_31 ++**/dir_root_31/ ++ ++# MATCH /dir_root_32 ++**/dir_root_32/* ++ ++# MATCH /dir_root_33 ++**/dir_root_33/** ++ ++ ++### dir in sub-dir ++ ++# MATCH /parent_dir/dir_deep_00 ++dir_deep_00 ++ ++# MATCH /parent_dir/dir_deep_01 ++dir_deep_01/ ++ ++# NO_MATCH ++dir_deep_02/* ++ ++# NO_MATCH ++dir_deep_03/** ++ ++ ++# NO_MATCH ++/dir_deep_10 ++ ++# NO_MATCH ++/dir_deep_11/ ++ ++# NO_MATCH ++/dir_deep_12/* ++ ++# NO_MATCH ++/dir_deep_13/** ++ ++ ++# MATCH /parent_dir/dir_deep_20 ++*/dir_deep_20 ++ ++# MATCH /parent_dir/dir_deep_21 ++*/dir_deep_21/ ++ ++# MATCH /parent_dir/dir_deep_22 ++*/dir_deep_22/* ++ ++# MATCH /parent_dir/dir_deep_23 ++*/dir_deep_23/** ++ ++ ++# MATCH /parent_dir/dir_deep_30 ++**/dir_deep_30 ++ ++# MATCH /parent_dir/dir_deep_31 ++**/dir_deep_31/ ++ ++# MATCH /parent_dir/dir_deep_32 ++**/dir_deep_32/* ++ ++# MATCH /parent_dir/dir_deep_33 ++**/dir_deep_33/** diff --cc vendor/ignore-0.4.3/tests/gitignore_matched_path_or_any_parents_tests.rs index 000000000,000000000..4de7cf3a0 new file mode 100644 --- /dev/null +++ b/vendor/ignore-0.4.3/tests/gitignore_matched_path_or_any_parents_tests.rs @@@ -1,0 -1,0 +1,297 @@@ ++extern crate ignore; ++ ++ ++use std::path::Path; ++ ++use ignore::gitignore::{Gitignore, GitignoreBuilder}; ++ ++ ++const IGNORE_FILE: &'static str = "tests/gitignore_matched_path_or_any_parents_tests.gitignore"; ++ ++ ++fn get_gitignore() -> Gitignore { ++ let mut builder = GitignoreBuilder::new("ROOT"); ++ let error = builder.add(IGNORE_FILE); ++ assert!(error.is_none(), "failed to open gitignore file"); ++ builder.build().unwrap() ++} ++ ++ ++#[test] ++#[should_panic(expected = "path is expect to be under the root")] ++fn test_path_should_be_under_root() { ++ let gitignore = get_gitignore(); ++ let path = "/tmp/some_file"; ++ gitignore.matched_path_or_any_parents(Path::new(path), false); ++ assert!(false); ++} ++ ++ ++#[test] ++fn test_files_in_root() { ++ let gitignore = get_gitignore(); ++ let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false); ++ ++ // 0x ++ assert!(m("ROOT/file_root_00").is_ignore()); ++ assert!(m("ROOT/file_root_01").is_none()); ++ assert!(m("ROOT/file_root_02").is_none()); ++ assert!(m("ROOT/file_root_03").is_none()); ++ ++ // 1x ++ assert!(m("ROOT/file_root_10").is_ignore()); ++ assert!(m("ROOT/file_root_11").is_none()); ++ assert!(m("ROOT/file_root_12").is_none()); ++ assert!(m("ROOT/file_root_13").is_none()); ++ ++ // 2x ++ assert!(m("ROOT/file_root_20").is_none()); ++ assert!(m("ROOT/file_root_21").is_none()); ++ assert!(m("ROOT/file_root_22").is_none()); ++ assert!(m("ROOT/file_root_23").is_none()); ++ ++ // 3x ++ assert!(m("ROOT/file_root_30").is_ignore()); ++ assert!(m("ROOT/file_root_31").is_none()); ++ assert!(m("ROOT/file_root_32").is_none()); ++ assert!(m("ROOT/file_root_33").is_none()); ++} ++ ++ ++#[test] ++fn test_files_in_deep() { ++ let gitignore = get_gitignore(); ++ let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false); ++ ++ // 0x ++ assert!(m("ROOT/parent_dir/file_deep_00").is_ignore()); ++ assert!(m("ROOT/parent_dir/file_deep_01").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_02").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_03").is_none()); ++ ++ // 1x ++ assert!(m("ROOT/parent_dir/file_deep_10").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_11").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_12").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_13").is_none()); ++ ++ // 2x ++ assert!(m("ROOT/parent_dir/file_deep_20").is_ignore()); ++ assert!(m("ROOT/parent_dir/file_deep_21").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_22").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_23").is_none()); ++ ++ // 3x ++ assert!(m("ROOT/parent_dir/file_deep_30").is_ignore()); ++ assert!(m("ROOT/parent_dir/file_deep_31").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_32").is_none()); ++ assert!(m("ROOT/parent_dir/file_deep_33").is_none()); ++} ++ ++ ++#[test] ++fn test_dirs_in_root() { ++ let gitignore = get_gitignore(); ++ let m = ++ |path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir); ++ ++ // 00 ++ assert!(m("ROOT/dir_root_00", true).is_ignore()); ++ assert!(m("ROOT/dir_root_00/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_00/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_00/child_dir/file", false).is_ignore()); ++ ++ // 01 ++ assert!(m("ROOT/dir_root_01", true).is_ignore()); ++ assert!(m("ROOT/dir_root_01/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_01/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_01/child_dir/file", false).is_ignore()); ++ ++ // 02 ++ assert!(m("ROOT/dir_root_02", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/dir_root_02/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_02/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_02/child_dir/file", false).is_ignore()); ++ ++ // 03 ++ assert!(m("ROOT/dir_root_03", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/dir_root_03/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_03/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_03/child_dir/file", false).is_ignore()); ++ ++ // 10 ++ assert!(m("ROOT/dir_root_10", true).is_ignore()); ++ assert!(m("ROOT/dir_root_10/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_10/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_10/child_dir/file", false).is_ignore()); ++ ++ // 11 ++ assert!(m("ROOT/dir_root_11", true).is_ignore()); ++ assert!(m("ROOT/dir_root_11/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_11/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_11/child_dir/file", false).is_ignore()); ++ ++ // 12 ++ assert!(m("ROOT/dir_root_12", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/dir_root_12/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_12/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_12/child_dir/file", false).is_ignore()); ++ ++ // 13 ++ assert!(m("ROOT/dir_root_13", true).is_none()); ++ assert!(m("ROOT/dir_root_13/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_13/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_13/child_dir/file", false).is_ignore()); ++ ++ // 20 ++ assert!(m("ROOT/dir_root_20", true).is_none()); ++ assert!(m("ROOT/dir_root_20/file", false).is_none()); ++ assert!(m("ROOT/dir_root_20/child_dir", true).is_none()); ++ assert!(m("ROOT/dir_root_20/child_dir/file", false).is_none()); ++ ++ // 21 ++ assert!(m("ROOT/dir_root_21", true).is_none()); ++ assert!(m("ROOT/dir_root_21/file", false).is_none()); ++ assert!(m("ROOT/dir_root_21/child_dir", true).is_none()); ++ assert!(m("ROOT/dir_root_21/child_dir/file", false).is_none()); ++ ++ // 22 ++ assert!(m("ROOT/dir_root_22", true).is_none()); ++ assert!(m("ROOT/dir_root_22/file", false).is_none()); ++ assert!(m("ROOT/dir_root_22/child_dir", true).is_none()); ++ assert!(m("ROOT/dir_root_22/child_dir/file", false).is_none()); ++ ++ // 23 ++ assert!(m("ROOT/dir_root_23", true).is_none()); ++ assert!(m("ROOT/dir_root_23/file", false).is_none()); ++ assert!(m("ROOT/dir_root_23/child_dir", true).is_none()); ++ assert!(m("ROOT/dir_root_23/child_dir/file", false).is_none()); ++ ++ // 30 ++ assert!(m("ROOT/dir_root_30", true).is_ignore()); ++ assert!(m("ROOT/dir_root_30/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_30/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_30/child_dir/file", false).is_ignore()); ++ ++ // 31 ++ assert!(m("ROOT/dir_root_31", true).is_ignore()); ++ assert!(m("ROOT/dir_root_31/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_31/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_31/child_dir/file", false).is_ignore()); ++ ++ // 32 ++ assert!(m("ROOT/dir_root_32", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/dir_root_32/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_32/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_32/child_dir/file", false).is_ignore()); ++ ++ // 33 ++ assert!(m("ROOT/dir_root_33", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/dir_root_33/file", false).is_ignore()); ++ assert!(m("ROOT/dir_root_33/child_dir", true).is_ignore()); ++ assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore()); ++} ++ ++ ++#[test] ++fn test_dirs_in_deep() { ++ let gitignore = get_gitignore(); ++ let m = ++ |path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir); ++ ++ // 00 ++ assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore()); ++ ++ // 01 ++ assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore()); ++ ++ // 02 ++ assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none()); ++ ++ // 03 ++ assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none()); ++ ++ // 10 ++ assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_10/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_10/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_10/child_dir/file", false).is_none()); ++ ++ // 11 ++ assert!(m("ROOT/parent_dir/dir_deep_11", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_11/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_11/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_11/child_dir/file", false).is_none()); ++ ++ // 12 ++ assert!(m("ROOT/parent_dir/dir_deep_12", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_12/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_12/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_12/child_dir/file", false).is_none()); ++ ++ // 13 ++ assert!(m("ROOT/parent_dir/dir_deep_13", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_13/file", false).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_13/child_dir", true).is_none()); ++ assert!(m("ROOT/parent_dir/dir_deep_13/child_dir/file", false).is_none()); ++ ++ // 20 ++ assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore()); ++ ++ // 21 ++ assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore()); ++ ++ // 22 ++ assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore()); ++ ++ // 23 ++ assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore()); ++ ++ // 30 ++ assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore()); ++ ++ // 31 ++ assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore()); ++ ++ // 32 ++ assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore()); ++ ++ // 33 ++ assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none()); // dir itself doesn't match ++ assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore()); ++ assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore()); ++} diff --cc vendor/lazy_static-1.1.0/.cargo-checksum.json index 000000000,000000000..d2e4af05b new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"} diff --cc vendor/lazy_static-1.1.0/.travis.yml index 000000000,000000000..820eec336 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/.travis.yml @@@ -1,0 -1,0 +1,35 @@@ ++language: rust ++matrix: ++ include: ++ - rust: 1.21.0 ++ - rust: stable ++ - os: osx ++ - rust: beta ++ - rust: nightly ++ script: ++ - cargo test ++ - cargo bench ++ - cargo test --features nightly ++ - cargo bench --features nightly ++ - CARGO_CFG_LAZY_STATIC_HEAP_IMPL=1 cargo test ++ - CARGO_CFG_LAZY_STATIC_HEAP_IMPL=1 cargo bench ++ - cargo test --features spin_no_std ++ - cargo bench --features spin_no_std ++ - cd compiletest ++ - cargo clean ++ - cargo test ++ - cd ../ ++ ++ - rust: nightly ++ before_script: ++ - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH ++ script: ++ - cargo doc --no-deps --all-features ++ after_success: ++ - travis-cargo --only nightly doc-upload ++script: ++ - cargo test ++ ++env: ++ global: ++ - secure: YXu24LptjeYirjWYjWGsMT2m3mB7LvQATE6TVo7VEUXv8GYoy2ORIHD83PeImxC93MmZ01QeUezRzuCW51ZcK92VnNSBttlF60SvIX18VsJrV92tsAhievFstqYQ+fB8DIuQ8noU0jPz7GpI+R9dlTRSImAqWOnVIghA+Wzz7Js= diff --cc vendor/lazy_static-1.1.0/Cargo.toml index 000000000,000000000..dfa585a73 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/Cargo.toml @@@ -1,0 -1,0 +1,47 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "lazy_static" ++version = "1.1.0" ++authors = ["Marvin Löbel "] ++build = "build.rs" ++description = "A macro for declaring lazily evaluated statics in Rust." ++documentation = "https://docs.rs/lazy_static" ++readme = "README.md" ++keywords = ["macro", "lazy", "static"] ++categories = ["no-std", "rust-patterns", "memory-management"] ++license = "MIT/Apache-2.0" ++repository = "https://github.com/rust-lang-nursery/lazy-static.rs" ++[dependencies.spin] ++version = "0.4.6" ++optional = true ++[build-dependencies.version_check] ++version = "0.1.4" ++ ++[features] ++nightly = [] ++spin_no_std = ["nightly", "spin"] ++[badges.appveyor] ++repository = "rust-lang-nursery/lazy-static.rs" ++ ++[badges.is-it-maintained-issue-resolution] ++repository = "rust-lang-nursery/lazy-static.rs" ++ ++[badges.is-it-maintained-open-issues] ++repository = "rust-lang-nursery/lazy-static.rs" ++ ++[badges.maintenance] ++status = "passively-maintained" ++ ++[badges.travis-ci] ++repository = "rust-lang-nursery/lazy-static.rs" diff --cc vendor/lazy_static-1.1.0/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/lazy_static-1.1.0/LICENSE-MIT index 000000000,000000000..25597d583 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2010 The Rust Project Developers ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/lazy_static-1.1.0/README.md index 000000000,000000000..b91f73352 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/README.md @@@ -1,0 -1,0 +1,73 @@@ ++lazy-static.rs ++============== ++ ++A macro for declaring lazily evaluated statics in Rust. ++ ++Using this macro, it is possible to have `static`s that require code to be ++executed at runtime in order to be initialized. ++This includes anything requiring heap allocations, like vectors or hash maps, ++as well as anything that requires non-const function calls to be computed. ++ ++[![Travis-CI Status](https://travis-ci.org/rust-lang-nursery/lazy-static.rs.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/lazy-static.rs) ++[![Latest version](https://img.shields.io/crates/v/lazy_static.svg)](https://crates.io/crates/lazy_static) ++[![Documentation](https://docs.rs/lazy_static/badge.svg)](https://docs.rs/lazy_static) ++[![License](https://img.shields.io/crates/l/lazy_static.svg)](https://github.com/rust-lang-nursery/lazy-static.rs#license) ++ ++ ++# Getting Started ++ ++[lazy-static.rs is available on crates.io](https://crates.io/crates/lazy_static). ++It is recommended to look there for the newest released version, as well as links to the newest builds of the docs. ++ ++At the point of the last update of this README, the latest published version could be used like this: ++ ++Add the following dependency to your Cargo manifest... ++ ++```toml ++[dependencies] ++lazy_static = "1.1.0" ++``` ++ ++...and see the [docs](https://docs.rs/lazy_static) for how to use it. ++ ++# Example ++ ++```rust ++#[macro_use] ++extern crate lazy_static; ++ ++use std::collections::HashMap; ++ ++lazy_static! { ++ static ref HASHMAP: HashMap = { ++ let mut m = HashMap::new(); ++ m.insert(0, "foo"); ++ m.insert(1, "bar"); ++ m.insert(2, "baz"); ++ m ++ }; ++} ++ ++fn main() { ++ // First access to `HASHMAP` initializes it ++ println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); ++ ++ // Any further access to `HASHMAP` just returns the computed value ++ println!("The entry for `1` is \"{}\".", HASHMAP.get(&1).unwrap()); ++} ++``` ++ ++## License ++ ++Licensed under either of ++ ++ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) ++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) ++ ++at your option. ++ ++### Contribution ++ ++Unless you explicitly state otherwise, any contribution intentionally submitted ++for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any ++additional terms or conditions. diff --cc vendor/lazy_static-1.1.0/appveyor.yml index 000000000,000000000..b13845248 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/appveyor.yml @@@ -1,0 -1,0 +1,61 @@@ ++environment: ++ global: ++ PROJECT_NAME: lazy_static ++ # When this was added there were revocation check failures when using the ++ # libcurl backend as libcurl checks by default, but rustup doesn't provide the ++ # switch to turn this off. Switch to Hyper which looks to not check for ++ # revocation by default like libcurl does. ++ RUSTUP_USE_REQWEST: 1 ++ CARGO_HTTP_CHECK_REVOKE: false ++ matrix: ++ # Stable channel ++ - TARGET: i686-pc-windows-gnu ++ CHANNEL: stable ++ - TARGET: i686-pc-windows-msvc ++ CHANNEL: stable ++ - TARGET: x86_64-pc-windows-gnu ++ CHANNEL: stable ++ - TARGET: x86_64-pc-windows-msvc ++ CHANNEL: stable ++ # Beta channel ++ - TARGET: i686-pc-windows-gnu ++ CHANNEL: beta ++ - TARGET: i686-pc-windows-msvc ++ CHANNEL: beta ++ - TARGET: x86_64-pc-windows-gnu ++ CHANNEL: beta ++ - TARGET: x86_64-pc-windows-msvc ++ CHANNEL: beta ++ # Nightly channel ++ - TARGET: i686-pc-windows-gnu ++ CHANNEL: nightly ++ - TARGET: i686-pc-windows-msvc ++ CHANNEL: nightly ++ - TARGET: x86_64-pc-windows-gnu ++ CHANNEL: nightly ++ - TARGET: x86_64-pc-windows-msvc ++ CHANNEL: nightly ++ ++# Install Rust and Cargo ++# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml) ++install: ++ - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe ++ - rustup-init.exe -y --default-toolchain %CHANNEL% --default-host %TARGET% ++ - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin ++ - if "%TARGET%" == "i686-pc-windows-gnu" set PATH=%PATH%;C:\msys64\mingw32\bin ++ - if "%TARGET%" == "x86_64-pc-windows-gnu" set PATH=%PATH%;C:\msys64\mingw64\bin ++ - rustc -V ++ - cargo -V ++ ++build: false ++ ++test_script: ++ - cargo build --verbose ++ - cargo test ++ - if [%CHANNEL%]==[nightly] ( ++ cd compiletest && ++ cargo clean && ++ cargo build --verbose && ++ cargo test && ++ cd ../ ++ ) diff --cc vendor/lazy_static-1.1.0/build.rs index 000000000,000000000..3d579f551 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/build.rs @@@ -1,0 -1,0 +1,44 @@@ ++extern crate version_check; ++ ++fn main() { ++ let is_var_set = |s| std::env::var_os(s).is_some(); ++ ++ // one can manually set a cfg to force an impl -- mostly useful for our own testing ++ let force_heap_cfg = is_var_set("CARGO_CFG_LAZY_STATIC_HEAP_IMPL"); ++ let force_inline_cfg = is_var_set("CARGO_CFG_LAZY_STATIC_INLINE_IMPL"); ++ let force_spin_cfg = is_var_set("CARGO_CFG_LAZY_STATIC_SPIN_IMPL"); ++ ++ let impls_forced = [force_heap_cfg, force_inline_cfg, force_spin_cfg] ++ .into_iter() ++ .filter(|&&f| f) ++ .count(); ++ ++ assert!( ++ impls_forced <= 1, ++ "lazy_static can only be built with one configuration at a time." ++ ); ++ ++ let nightly_feature_enabled = is_var_set("CARGO_FEATURE_NIGHTLY"); ++ let spin_feature_enabled = is_var_set("CARGO_FEATURE_SPIN_NO_STD"); ++ ++ let version_geq_122 = version_check::is_min_version("1.22.0").unwrap().0; ++ let drop_in_static_supported = version_geq_122 || nightly_feature_enabled; ++ ++ // precedence: ++ // 1. explicit requests via cfg or spin_no_std feature ++ // 2. inline impl with newer rustc version or nightly feature (latter for backcompat) ++ // 3. fallback to allocating implementation ++ let impl_name = if force_heap_cfg { ++ "heap" ++ } else if force_inline_cfg { ++ "inline" ++ } else if force_spin_cfg || spin_feature_enabled { ++ "spin" ++ } else if drop_in_static_supported { ++ "inline" ++ } else { ++ "heap" ++ }; ++ ++ println!("cargo:rustc-cfg=lazy_static_{}_impl", impl_name); ++} diff --cc vendor/lazy_static-1.1.0/src/core_lazy.rs index 000000000,000000000..ba496a68b new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/src/core_lazy.rs @@@ -1,0 -1,0 +1,34 @@@ ++// Copyright 2016 lazy-static.rs Developers ++// ++// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be ++// copied, modified, or distributed except according to those terms. ++ ++extern crate spin; ++ ++use self::spin::Once; ++ ++pub struct Lazy(Once); ++ ++impl Lazy { ++ #[inline(always)] ++ pub const fn new() -> Self { ++ Lazy(Once::new()) ++ } ++ ++ #[inline(always)] ++ pub fn get(&'static self, builder: F) -> &T ++ where F: FnOnce() -> T ++ { ++ self.0.call_once(builder) ++ } ++} ++ ++#[macro_export] ++#[doc(hidden)] ++macro_rules! __lazy_static_create { ++ ($NAME:ident, $T:ty) => { ++ static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); ++ } ++} diff --cc vendor/lazy_static-1.1.0/src/heap_lazy.rs index 000000000,000000000..c67a30189 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/src/heap_lazy.rs @@@ -1,0 -1,0 +1,43 @@@ ++// Copyright 2016 lazy-static.rs Developers ++// ++// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be ++// copied, modified, or distributed except according to those terms. ++ ++extern crate std; ++ ++use self::std::prelude::v1::*; ++use self::std::sync::Once; ++pub use self::std::sync::ONCE_INIT; ++ ++pub struct Lazy(*const T, Once); ++ ++impl Lazy { ++ pub const INIT: Self = Lazy(0 as *const T, ONCE_INIT); ++ ++ #[inline(always)] ++ pub fn get(&'static mut self, f: F) -> &T ++ where ++ F: FnOnce() -> T, ++ { ++ unsafe { ++ let r = &mut self.0; ++ self.1.call_once(|| { ++ *r = Box::into_raw(Box::new(f())); ++ }); ++ ++ &*self.0 ++ } ++ } ++} ++ ++unsafe impl Sync for Lazy {} ++ ++#[macro_export] ++#[doc(hidden)] ++macro_rules! __lazy_static_create { ++ ($NAME:ident, $T:ty) => { ++ static mut $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::INIT; ++ }; ++} diff --cc vendor/lazy_static-1.1.0/src/inline_lazy.rs index 000000000,000000000..201a3c0b5 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/src/inline_lazy.rs @@@ -1,0 -1,0 +1,59 @@@ ++// Copyright 2016 lazy-static.rs Developers ++// ++// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be ++// copied, modified, or distributed except according to those terms. ++ ++extern crate core; ++extern crate std; ++ ++use self::std::prelude::v1::*; ++use self::std::sync::Once; ++pub use self::std::sync::ONCE_INIT; ++ ++pub struct Lazy(Option, Once); ++ ++impl Lazy { ++ pub const INIT: Self = Lazy(None, ONCE_INIT); ++ ++ #[inline(always)] ++ pub fn get(&'static mut self, f: F) -> &T ++ where ++ F: FnOnce() -> T, ++ { ++ { ++ let r = &mut self.0; ++ self.1.call_once(|| { ++ *r = Some(f()); ++ }); ++ } ++ unsafe { ++ match self.0 { ++ Some(ref x) => x, ++ None => unreachable_unchecked(), ++ } ++ } ++ } ++} ++ ++unsafe impl Sync for Lazy {} ++ ++#[macro_export] ++#[doc(hidden)] ++macro_rules! __lazy_static_create { ++ ($NAME:ident, $T:ty) => { ++ static mut $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::INIT; ++ }; ++} ++ ++/// Polyfill for std::hint::unreachable_unchecked. There currently exists a ++/// [crate](https://docs.rs/unreachable) for an equivalent to std::hint::unreachable_unchecked, but ++/// lazy_static currently doesn't include any runtime dependencies and we've chosen to include this ++/// short polyfill rather than include a new crate in every consumer's build. ++/// ++/// This should be replaced by std's version when lazy_static starts to require at least Rust 1.27. ++unsafe fn unreachable_unchecked() -> ! { ++ enum Void {} ++ match std::mem::uninitialized::() {} ++} diff --cc vendor/lazy_static-1.1.0/src/lib.rs index 000000000,000000000..a01dd4bc0 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/src/lib.rs @@@ -1,0 -1,0 +1,223 @@@ ++// Copyright 2016 lazy-static.rs Developers ++// ++// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be ++// copied, modified, or distributed except according to those terms. ++ ++/*! ++A macro for declaring lazily evaluated statics. ++ ++Using this macro, it is possible to have `static`s that require code to be ++executed at runtime in order to be initialized. ++This includes anything requiring heap allocations, like vectors or hash maps, ++as well as anything that requires function calls to be computed. ++ ++# Syntax ++ ++```ignore ++lazy_static! { ++ [pub] static ref NAME_1: TYPE_1 = EXPR_1; ++ [pub] static ref NAME_2: TYPE_2 = EXPR_2; ++ ... ++ [pub] static ref NAME_N: TYPE_N = EXPR_N; ++} ++``` ++ ++Attributes (including doc comments) are supported as well: ++ ++```rust ++# #[macro_use] ++# extern crate lazy_static; ++# fn main() { ++lazy_static! { ++ /// This is an example for using doc comment attributes ++ static ref EXAMPLE: u8 = 42; ++} ++# } ++``` ++ ++# Semantics ++ ++For a given `static ref NAME: TYPE = EXPR;`, the macro generates a unique type that ++implements `Deref` and stores it in a static with name `NAME`. (Attributes end up ++attaching to this type.) ++ ++On first deref, `EXPR` gets evaluated and stored internally, such that all further derefs ++can return a reference to the same object. Note that this can lead to deadlocks ++if you have multiple lazy statics that depend on each other in their initialization. ++ ++Apart from the lazy initialization, the resulting "static ref" variables ++have generally the same properties as regular "static" variables: ++ ++- Any type in them needs to fulfill the `Sync` trait. ++- If the type has a destructor, then it will not run when the process exits. ++ ++# Example ++ ++Using the macro: ++ ++```rust ++#[macro_use] ++extern crate lazy_static; ++ ++use std::collections::HashMap; ++ ++lazy_static! { ++ static ref HASHMAP: HashMap = { ++ let mut m = HashMap::new(); ++ m.insert(0, "foo"); ++ m.insert(1, "bar"); ++ m.insert(2, "baz"); ++ m ++ }; ++ static ref COUNT: usize = HASHMAP.len(); ++ static ref NUMBER: u32 = times_two(21); ++} ++ ++fn times_two(n: u32) -> u32 { n * 2 } ++ ++fn main() { ++ println!("The map has {} entries.", *COUNT); ++ println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); ++ println!("A expensive calculation on a static results in: {}.", *NUMBER); ++} ++``` ++ ++# Implementation details ++ ++The `Deref` implementation uses a hidden static variable that is guarded by an atomic check on each access. ++ ++# Cargo features ++ ++This crate provides two cargo features: ++ ++- `nightly`: This uses unstable language features only available on the nightly release channel for a more optimal implementation. In practice this currently means avoiding a heap allocation per static. This feature might get deprecated at a later point once all relevant optimizations are usable from stable. ++- `spin_no_std` (implies `nightly`): This allows using this crate in a no-std environment, by depending on the standalone `spin` crate. ++ ++Both features depend on unstable language features, which means ++no guarantees can be made about them in regard to SemVer stability. ++ ++*/ ++ ++// NOTE: see build.rs for where these cfg values are set. ++#![cfg_attr(lazy_static_spin_impl, feature(const_fn))] ++ ++#![doc(html_root_url = "https://docs.rs/lazy_static/1.1.0")] ++#![no_std] ++ ++#[cfg(lazy_static_heap_impl)] ++#[path="heap_lazy.rs"] ++#[doc(hidden)] ++pub mod lazy; ++ ++#[cfg(lazy_static_inline_impl)] ++#[path="inline_lazy.rs"] ++#[doc(hidden)] ++pub mod lazy; ++ ++#[cfg(lazy_static_spin_impl)] ++#[path="core_lazy.rs"] ++#[doc(hidden)] ++pub mod lazy; ++ ++#[doc(hidden)] ++pub use core::ops::Deref as __Deref; ++ ++#[macro_export(local_inner_macros)] ++#[doc(hidden)] ++macro_rules! __lazy_static_internal { ++ // optional visibility restrictions are wrapped in `()` to allow for ++ // explicitly passing otherwise implicit information about private items ++ ($(#[$attr:meta])* ($($vis:tt)*) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { ++ __lazy_static_internal!(@MAKE TY, $(#[$attr])*, ($($vis)*), $N); ++ __lazy_static_internal!(@TAIL, $N : $T = $e); ++ lazy_static!($($t)*); ++ }; ++ (@TAIL, $N:ident : $T:ty = $e:expr) => { ++ impl $crate::__Deref for $N { ++ type Target = $T; ++ #[allow(unsafe_code)] ++ fn deref(&self) -> &$T { ++ unsafe { ++ #[inline(always)] ++ fn __static_ref_initialize() -> $T { $e } ++ ++ #[inline(always)] ++ unsafe fn __stability() -> &'static $T { ++ __lazy_static_create!(LAZY, $T); ++ LAZY.get(__static_ref_initialize) ++ } ++ __stability() ++ } ++ } ++ } ++ impl $crate::LazyStatic for $N { ++ fn initialize(lazy: &Self) { ++ let _ = &**lazy; ++ } ++ } ++ }; ++ // `vis` is wrapped in `()` to prevent parsing ambiguity ++ (@MAKE TY, $(#[$attr:meta])*, ($($vis:tt)*), $N:ident) => { ++ #[allow(missing_copy_implementations)] ++ #[allow(non_camel_case_types)] ++ #[allow(dead_code)] ++ $(#[$attr])* ++ $($vis)* struct $N {__private_field: ()} ++ #[doc(hidden)] ++ $($vis)* static $N: $N = $N {__private_field: ()}; ++ }; ++ () => () ++} ++ ++#[macro_export(local_inner_macros)] ++macro_rules! lazy_static { ++ ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { ++ // use `()` to explicitly forward the information about private items ++ __lazy_static_internal!($(#[$attr])* () static ref $N : $T = $e; $($t)*); ++ }; ++ ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { ++ __lazy_static_internal!($(#[$attr])* (pub) static ref $N : $T = $e; $($t)*); ++ }; ++ ($(#[$attr:meta])* pub ($($vis:tt)+) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { ++ __lazy_static_internal!($(#[$attr])* (pub ($($vis)+)) static ref $N : $T = $e; $($t)*); ++ }; ++ () => () ++} ++ ++/// Support trait for enabling a few common operation on lazy static values. ++/// ++/// This is implemented by each defined lazy static, and ++/// used by the free functions in this crate. ++pub trait LazyStatic { ++ #[doc(hidden)] ++ fn initialize(lazy: &Self); ++} ++ ++/// Takes a shared reference to a lazy static and initializes ++/// it if it has not been already. ++/// ++/// This can be used to control the initialization point of a lazy static. ++/// ++/// Example: ++/// ++/// ```rust ++/// #[macro_use] ++/// extern crate lazy_static; ++/// ++/// lazy_static! { ++/// static ref BUFFER: Vec = (0..65537).collect(); ++/// } ++/// ++/// fn main() { ++/// lazy_static::initialize(&BUFFER); ++/// ++/// // ... ++/// work_with_initialized_data(&BUFFER); ++/// } ++/// # fn work_with_initialized_data(_: &[u8]) {} ++/// ``` ++pub fn initialize(lazy: &T) { ++ LazyStatic::initialize(lazy); ++} diff --cc vendor/lazy_static-1.1.0/tests/no_std.rs index 000000000,000000000..b460e7932 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/tests/no_std.rs @@@ -1,0 -1,0 +1,21 @@@ ++#![cfg(feature="spin_no_std")] ++#![feature(const_fn)] ++ ++#![no_std] ++ ++#[macro_use] ++extern crate lazy_static; ++ ++lazy_static! { ++ /// Documentation! ++ pub static ref NUMBER: u32 = times_two(3); ++} ++ ++fn times_two(n: u32) -> u32 { ++ n * 2 ++} ++ ++#[test] ++fn test_basic() { ++ assert_eq!(*NUMBER, 6); ++} diff --cc vendor/lazy_static-1.1.0/tests/test.rs index 000000000,000000000..654abc541 new file mode 100644 --- /dev/null +++ b/vendor/lazy_static-1.1.0/tests/test.rs @@@ -1,0 -1,0 +1,162 @@@ ++#[macro_use] ++extern crate lazy_static; ++use std::collections::HashMap; ++ ++lazy_static! { ++ /// Documentation! ++ pub static ref NUMBER: u32 = times_two(3); ++ ++ static ref ARRAY_BOXES: [Box; 3] = [Box::new(1), Box::new(2), Box::new(3)]; ++ ++ /// More documentation! ++ #[allow(unused_variables)] ++ #[derive(Copy, Clone, Debug)] ++ pub static ref STRING: String = "hello".to_string(); ++ ++ static ref HASHMAP: HashMap = { ++ let mut m = HashMap::new(); ++ m.insert(0, "abc"); ++ m.insert(1, "def"); ++ m.insert(2, "ghi"); ++ m ++ }; ++ ++ // This should not compile if the unsafe is removed. ++ static ref UNSAFE: u32 = unsafe { ++ std::mem::transmute::(-1) ++ }; ++} ++ ++lazy_static! { ++ static ref S1: &'static str = "a"; ++ static ref S2: &'static str = "b"; ++} ++lazy_static! { ++ static ref S3: String = [*S1, *S2].join(""); ++} ++ ++#[test] ++fn s3() { ++ assert_eq!(&*S3, "ab"); ++} ++ ++fn times_two(n: u32) -> u32 { ++ n * 2 ++} ++ ++#[test] ++fn test_basic() { ++ assert_eq!(&**STRING, "hello"); ++ assert_eq!(*NUMBER, 6); ++ assert!(HASHMAP.get(&1).is_some()); ++ assert!(HASHMAP.get(&3).is_none()); ++ assert_eq!(&*ARRAY_BOXES, &[Box::new(1), Box::new(2), Box::new(3)]); ++ assert_eq!(*UNSAFE, std::u32::MAX); ++} ++ ++#[test] ++fn test_repeat() { ++ assert_eq!(*NUMBER, 6); ++ assert_eq!(*NUMBER, 6); ++ assert_eq!(*NUMBER, 6); ++} ++ ++#[test] ++fn test_meta() { ++ // this would not compile if STRING were not marked #[derive(Copy, Clone)] ++ let copy_of_string = STRING; ++ // just to make sure it was copied ++ assert!(&STRING as *const _ != ©_of_string as *const _); ++ ++ // this would not compile if STRING were not marked #[derive(Debug)] ++ assert_eq!(format!("{:?}", STRING), "STRING { __private_field: () }".to_string()); ++} ++ ++mod visibility { ++ lazy_static! { ++ pub static ref FOO: Box = Box::new(0); ++ static ref BAR: Box = Box::new(98); ++ } ++ ++ pub mod inner { ++ lazy_static! { ++ pub(in visibility) static ref BAZ: Box = Box::new(42); ++ pub(crate) static ref BAG: Box = Box::new(37); ++ } ++ } ++ ++ #[test] ++ fn sub_test() { ++ assert_eq!(**FOO, 0); ++ assert_eq!(**BAR, 98); ++ assert_eq!(**inner::BAZ, 42); ++ assert_eq!(**inner::BAG, 37); ++ } ++} ++ ++#[test] ++fn test_visibility() { ++ assert_eq!(*visibility::FOO, Box::new(0)); ++ assert_eq!(*visibility::inner::BAG, Box::new(37)); ++} ++ ++// This should not cause a warning about a missing Copy implementation ++lazy_static! { ++ pub static ref VAR: i32 = { 0 }; ++} ++ ++#[derive(Copy, Clone, Debug, PartialEq)] ++struct X; ++struct Once(X); ++const ONCE_INIT: Once = Once(X); ++static DATA: X = X; ++static ONCE: X = X; ++fn require_sync() -> X { X } ++fn transmute() -> X { X } ++fn __static_ref_initialize() -> X { X } ++fn test(_: Vec) -> X { X } ++ ++// All these names should not be shadowed ++lazy_static! { ++ static ref ITEM_NAME_TEST: X = { ++ test(vec![X, Once(X).0, ONCE_INIT.0, DATA, ONCE, ++ require_sync(), transmute(), ++ // Except this, which will sadly be shadowed by internals: ++ // __static_ref_initialize() ++ ]) ++ }; ++} ++ ++#[test] ++fn item_name_shadowing() { ++ assert_eq!(*ITEM_NAME_TEST, X); ++} ++ ++use std::sync::atomic::AtomicBool; ++use std::sync::atomic::ATOMIC_BOOL_INIT; ++use std::sync::atomic::Ordering::SeqCst; ++ ++static PRE_INIT_FLAG: AtomicBool = ATOMIC_BOOL_INIT; ++ ++lazy_static! { ++ static ref PRE_INIT: () = { ++ PRE_INIT_FLAG.store(true, SeqCst); ++ () ++ }; ++} ++ ++#[test] ++fn pre_init() { ++ assert_eq!(PRE_INIT_FLAG.load(SeqCst), false); ++ lazy_static::initialize(&PRE_INIT); ++ assert_eq!(PRE_INIT_FLAG.load(SeqCst), true); ++} ++ ++lazy_static! { ++ static ref LIFETIME_NAME: for<'a> fn(&'a u8) = { fn f(_: &u8) {} f }; ++} ++ ++#[test] ++fn lifetime_name() { ++ let _ = LIFETIME_NAME; ++} diff --cc vendor/libc-0.2.43/.cargo-checksum.json index 000000000,000000000..23b2d5b48 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/.cargo-checksum.json @@@ -1,0 -1,0 +1,1 @@@ ++{"files":{},"package":"76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"} diff --cc vendor/libc-0.2.43/.travis.yml index 000000000,000000000..b6a73f00c new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/.travis.yml @@@ -1,0 -1,0 +1,123 @@@ ++language: rust ++rust: stable ++sudo: required ++dist: trusty ++services: ++ - docker ++install: ++ - if [ -z "$NO_ADD" ]; then rustup target add $TARGET; fi ++script: ++ - cargo build $OPT ++ - cargo build $OPT --no-default-features ++ - cargo generate-lockfile --manifest-path libc-test/Cargo.toml ++ - if [[ $TRAVIS_OS_NAME = "linux" ]]; then ++ sh ci/run-docker.sh $TARGET; ++ else ++ export CARGO_TARGET_DIR=`pwd`/target; ++ sh ci/run.sh $TARGET; ++ fi ++ - rustc ci/style.rs && ./style src ++env: ++ global: ++ secure: "e2/3QjgRN9atOuSHp22TrYG7QVKcYUWY48Hi9b60w+r1+BhPkTseIJLte7WefRhdXtqpjjUJTooKDhnurFOeHaCT+nmBgiv+FPU893sBl4bhesY4m0vgUJVbNZcs6lTImYekWVb+aqjGdgV/XAgCw7c3kPmrZV0MzGDWL64Xaps=" ++matrix: ++ allow_failures: ++ # FIXME(#987) move back to include once 404 is fixed ++ - env: TARGET=s390x-unknown-linux-gnu ++ include: ++ # 1.13.0 compat ++ - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 ++ rust: 1.13.0 ++ script: rm -f Cargo.lock && cargo build ++ install: ++ ++ # build documentation ++ - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 ++ rust: nightly ++ script: sh ci/dox.sh ++ ++ # stable compat ++ - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 ++ - env: TARGET=i686-unknown-linux-gnu ++ - os: osx ++ env: TARGET=x86_64-apple-darwin NO_ADD=1 ++ osx_image: xcode9.4 ++ - os: osx ++ env: TARGET=i686-apple-darwin ++ osx_image: xcode9.4 ++ - env: TARGET=arm-linux-androideabi ++ - env: TARGET=aarch64-linux-android ++ # FIXME(#826) should reenable ++ #- env: TARGET=i686-linux-android ++ - env: TARGET=x86_64-linux-android ++ - env: TARGET=x86_64-unknown-linux-musl ++ - env: TARGET=i686-unknown-linux-musl ++ - env: TARGET=arm-unknown-linux-gnueabihf ++ - env: TARGET=arm-unknown-linux-musleabihf ++ - env: TARGET=aarch64-unknown-linux-gnu ++ - env: TARGET=aarch64-unknown-linux-musl ++ # FIXME(#856) ++ rust: 1.22.1 ++ - os: osx ++ osx_image: xcode9.4 ++ env: TARGET=i386-apple-ios ++ CARGO_TARGET_I386_APPLE_IOS_RUNNER=$HOME/runtest ++ RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 ++ before_install: ++ rustc ./ci/ios/deploy_and_run_on_ios_simulator.rs -o $HOME/runtest ++ - os: osx ++ osx_image: xcode9.4 ++ env: TARGET=x86_64-apple-ios ++ CARGO_TARGET_X86_64_APPLE_IOS_RUNNER=$HOME/runtest ++ RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 ++ before_install: ++ rustc ./ci/ios/deploy_and_run_on_ios_simulator.rs -o $HOME/runtest ++ - env: TARGET=x86_64-rumprun-netbsd ++ - env: TARGET=powerpc-unknown-linux-gnu ++ - env: TARGET=powerpc64-unknown-linux-gnu ++ - env: TARGET=powerpc64le-unknown-linux-gnu ++ - env: TARGET=mips-unknown-linux-musl ++ - env: TARGET=mipsel-unknown-linux-musl ++ - env: TARGET=mips64-unknown-linux-gnuabi64 ++ - env: TARGET=mips64el-unknown-linux-gnuabi64 ++ - env: TARGET=mips-unknown-linux-gnu ++ - env: TARGET=s390x-unknown-linux-gnu ++ - env: TARGET=sparc64-unknown-linux-gnu ++ - env: TARGET=asmjs-unknown-emscripten ++ - env: TARGET=wasm32-unknown-emscripten ++ ++ # beta ++ - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 ++ rust: beta ++ - os: osx ++ env: TARGET=x86_64-apple-darwin NO_ADD=1 ++ osx_image: xcode9.4 ++ rust: beta ++ ++ # nightly ++ - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 ++ rust: nightly ++ - os: osx ++ env: TARGET=x86_64-apple-darwin NO_ADD=1 ++ osx_image: xcode9.4 ++ rust: nightly ++ # not available on stable ++ # without --release the build fails ++ # see https://github.com/rust-lang/rust/issues/45417 ++ - env: TARGET=x86_64-unknown-linux-gnux32 OPT="--release" ++ rust: nightly ++ ++ # QEMU based targets that compile in an emulator ++ - env: TARGET=x86_64-unknown-freebsd ++ allow_failures: ++ - env: TARGET=i386-apple-ios ++ CARGO_TARGET_I386_APPLE_IOS_RUNNER=$HOME/runtest ++ RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 ++ - env: TARGET=x86_64-apple-ios ++ CARGO_TARGET_X86_64_APPLE_IOS_RUNNER=$HOME/runtest ++ RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 ++ ++notifications: ++ email: ++ on_success: never ++ webhooks: https://buildbot.rust-lang.org/homu/travis diff --cc vendor/libc-0.2.43/Cargo.toml index 000000000,000000000..876b127c0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/Cargo.toml @@@ -1,0 -1,0 +1,33 @@@ ++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO ++# ++# When uploading crates to the registry Cargo will automatically ++# "normalize" Cargo.toml files for maximal compatibility ++# with all versions of Cargo and also rewrite `path` dependencies ++# to registry (e.g. crates.io) dependencies ++# ++# If you believe there's an error in this file please file an ++# issue against the rust-lang/cargo repository. If you're ++# editing this file be aware that the upstream Cargo.toml ++# will likely look very different (and much more reasonable) ++ ++[package] ++name = "libc" ++version = "0.2.43" ++authors = ["The Rust Project Developers"] ++description = "A library for types and bindings to native C functions often found in libc or\nother common platform libraries.\n" ++homepage = "https://github.com/rust-lang/libc" ++documentation = "http://doc.rust-lang.org/libc" ++readme = "README.md" ++license = "MIT/Apache-2.0" ++repository = "https://github.com/rust-lang/libc" ++ ++[features] ++align = [] ++default = ["use_std"] ++use_std = [] ++[badges.appveyor] ++project_name = "rust-lang-libs/libc" ++repository = "rust-lang/libc" ++ ++[badges.travis-ci] ++repository = "rust-lang/libc" diff --cc vendor/libc-0.2.43/LICENSE-APACHE index 000000000,000000000..16fe87b06 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/LICENSE-APACHE @@@ -1,0 -1,0 +1,201 @@@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++END OF TERMS AND CONDITIONS ++ ++APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "[]" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++Copyright [yyyy] [name of copyright owner] ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. diff --cc vendor/libc-0.2.43/LICENSE-MIT index 000000000,000000000..39d4bdb5a new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/LICENSE-MIT @@@ -1,0 -1,0 +1,25 @@@ ++Copyright (c) 2014 The Rust Project Developers ++ ++Permission is hereby granted, free of charge, to any ++person obtaining a copy of this software and associated ++documentation files (the "Software"), to deal in the ++Software without restriction, including without ++limitation the rights to use, copy, modify, merge, ++publish, distribute, sublicense, and/or sell copies of ++the Software, and to permit persons to whom the Software ++is furnished to do so, subject to the following ++conditions: ++ ++The above copyright notice and this permission notice ++shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED ++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A ++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT ++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR ++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++DEALINGS IN THE SOFTWARE. diff --cc vendor/libc-0.2.43/README.md index 000000000,000000000..a19a56ee0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/README.md @@@ -1,0 -1,0 +1,174 @@@ ++libc ++==== ++ ++A Rust library with native bindings to the types and functions commonly found on ++various systems, including libc. ++ ++[![Build Status](https://travis-ci.org/rust-lang/libc.svg?branch=master)](https://travis-ci.org/rust-lang/libc) ++[![Build status](https://ci.appveyor.com/api/projects/status/github/rust-lang/libc?svg=true)](https://ci.appveyor.com/project/rust-lang-libs/libc) ++[![Latest version](https://img.shields.io/crates/v/libc.svg)](https://crates.io/crates/libc) ++[![Documentation](https://docs.rs/libc/badge.svg)](https://docs.rs/libc) ++![License](https://img.shields.io/crates/l/libc.svg) ++ ++ ++## Usage ++ ++First, add the following to your `Cargo.toml`: ++ ++```toml ++[dependencies] ++libc = "0.2" ++``` ++ ++Next, add this to your crate root: ++ ++```rust ++extern crate libc; ++``` ++ ++Currently libc by default links to the standard library, but if you would ++instead like to use libc in a `#![no_std]` situation or crate you can request ++this via: ++ ++```toml ++[dependencies] ++libc = { version = "0.2", default-features = false } ++``` ++ ++By default libc uses private fields in structs in order to enforce a certain ++memory alignment on them. These structs can be hard to instantiate outside of ++libc. To make libc use `#[repr(align(x))]`, instead of the private fields, ++activate the *align* feature. This requires Rust 1.25 or newer: ++ ++```toml ++[dependencies] ++libc = { version = "0.2", features = ["align"] } ++``` ++ ++## What is libc? ++ ++The primary purpose of this crate is to provide all of the definitions necessary ++to easily interoperate with C code (or "C-like" code) on each of the platforms ++that Rust supports. This includes type definitions (e.g. `c_int`), constants ++(e.g. `EINVAL`) as well as function headers (e.g. `malloc`). ++ ++This crate does not strive to have any form of compatibility across platforms, ++but rather it is simply a straight binding to the system libraries on the ++platform in question. ++ ++## Public API ++ ++This crate exports all underlying platform types, functions, and constants under ++the crate root, so all items are accessible as `libc::foo`. The types and values ++of all the exported APIs match the platform that libc is compiled for. ++ ++More detailed information about the design of this library can be found in its ++[associated RFC][rfc]. ++ ++[rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1291-promote-libc.md ++ ++## Adding an API ++ ++Want to use an API which currently isn't bound in `libc`? It's quite easy to add ++one! ++ ++The internal structure of this crate is designed to minimize the number of ++`#[cfg]` attributes in order to easily be able to add new items which apply ++to all platforms in the future. As a result, the crate is organized ++hierarchically based on platform. Each module has a number of `#[cfg]`'d ++children, but only one is ever actually compiled. Each module then reexports all ++the contents of its children. ++ ++This means that for each platform that libc supports, the path from a ++leaf module to the root will contain all bindings for the platform in question. ++Consequently, this indicates where an API should be added! Adding an API at a ++particular level in the hierarchy means that it is supported on all the child ++platforms of that level. For example, when adding a Unix API it should be added ++to `src/unix/mod.rs`, but when adding a Linux-only API it should be added to ++`src/unix/notbsd/linux/mod.rs`. ++ ++If you're not 100% sure at what level of the hierarchy an API should be added ++at, fear not! This crate has CI support which tests any binding against all ++platforms supported, so you'll see failures if an API is added at the wrong ++level or has different signatures across platforms. ++ ++With that in mind, the steps for adding a new API are: ++ ++1. Determine where in the module hierarchy your API should be added. ++2. Add the API. ++3. Send a PR to this repo. ++4. Wait for CI to pass, fixing errors. ++5. Wait for a merge! ++ ++### Test before you commit ++ ++We have two automated tests running on [Travis](https://travis-ci.org/rust-lang/libc): ++ ++1. [`libc-test`](https://github.com/alexcrichton/ctest) ++ - `cd libc-test && cargo test` ++ - Use the `skip_*()` functions in `build.rs` if you really need a workaround. ++2. Style checker ++ - `rustc ci/style.rs && ./style src` ++ ++### Releasing your change to crates.io ++ ++Now that you've done the amazing job of landing your new API or your new ++platform in this crate, the next step is to get that sweet, sweet usage from ++crates.io! The only next step is to bump the version of libc and then publish ++it. If you'd like to get a release out ASAP you can follow these steps: ++ ++1. Update the version number in `Cargo.toml`, you'll just be bumping the patch ++ version number. ++2. Run `cargo update` to regenerate the lockfile to encode your version bump in ++ the lock file. You may pull in some other updated dependencies, that's ok. ++3. Send a PR to this repository. It should [look like this][example], but it'd ++ also be nice to fill out the description with a small rationale for the ++ release (any rationale is ok though!) ++4. Once merged the release will be tagged and published by one of the libc crate ++ maintainers. ++ ++[example]: https://github.com/rust-lang/libc/pull/583 ++ ++## Platforms and Documentation ++ ++The following platforms are currently tested and have documentation available: ++ ++Tested: ++ * [`i686-pc-windows-msvc`](https://rust-lang.github.io/libc/i686-pc-windows-msvc/libc/) ++ * [`x86_64-pc-windows-msvc`](https://rust-lang.github.io/libc/x86_64-pc-windows-msvc/libc/) ++ (Windows) ++ * [`i686-pc-windows-gnu`](https://rust-lang.github.io/libc/i686-pc-windows-gnu/libc/) ++ * [`x86_64-pc-windows-gnu`](https://rust-lang.github.io/libc/x86_64-pc-windows-gnu/libc/) ++ * [`i686-apple-darwin`](https://rust-lang.github.io/libc/i686-apple-darwin/libc/) ++ * [`x86_64-apple-darwin`](https://rust-lang.github.io/libc/x86_64-apple-darwin/libc/) ++ (OSX) ++ * `i386-apple-ios` ++ * `x86_64-apple-ios` ++ * [`i686-unknown-linux-gnu`](https://rust-lang.github.io/libc/i686-unknown-linux-gnu/libc/) ++ * [`x86_64-unknown-linux-gnu`](https://rust-lang.github.io/libc/x86_64-unknown-linux-gnu/libc/) ++ (Linux) ++ * [`x86_64-unknown-linux-musl`](https://rust-lang.github.io/libc/x86_64-unknown-linux-musl/libc/) ++ (Linux MUSL) ++ * [`aarch64-unknown-linux-gnu`](https://rust-lang.github.io/libc/aarch64-unknown-linux-gnu/libc/) ++ (Linux) ++ * `aarch64-unknown-linux-musl` ++ (Linux MUSL) ++ * [`sparc64-unknown-linux-gnu`](https://rust-lang.github.io/libc/sparc64-unknown-linux-gnu/libc/) ++ (Linux) ++ * [`mips-unknown-linux-gnu`](https://rust-lang.github.io/libc/mips-unknown-linux-gnu/libc/) ++ * [`arm-unknown-linux-gnueabihf`](https://rust-lang.github.io/libc/arm-unknown-linux-gnueabihf/libc/) ++ * [`arm-linux-androideabi`](https://rust-lang.github.io/libc/arm-linux-androideabi/libc/) ++ (Android) ++ * [`x86_64-unknown-freebsd`](https://rust-lang.github.io/libc/x86_64-unknown-freebsd/libc/) ++ * [`x86_64-unknown-openbsd`](https://rust-lang.github.io/libc/x86_64-unknown-openbsd/libc/) ++ * [`x86_64-rumprun-netbsd`](https://rust-lang.github.io/libc/x86_64-unknown-netbsd/libc/) ++ ++The following may be supported, but are not guaranteed to always work: ++ ++ * `i686-unknown-freebsd` ++ * [`x86_64-unknown-bitrig`](https://rust-lang.github.io/libc/x86_64-unknown-bitrig/libc/) ++ * [`x86_64-unknown-dragonfly`](https://rust-lang.github.io/libc/x86_64-unknown-dragonfly/libc/) ++ * `i686-unknown-haiku` ++ * `x86_64-unknown-haiku` ++ * [`x86_64-unknown-netbsd`](https://rust-lang.github.io/libc/x86_64-unknown-netbsd/libc/) ++ * [`x86_64-sun-solaris`](https://rust-lang.github.io/libc/x86_64-sun-solaris/libc/) diff --cc vendor/libc-0.2.43/appveyor.yml index 000000000,000000000..fe2a332a1 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/appveyor.yml @@@ -1,0 -1,0 +1,28 @@@ ++environment: ++ # When this was added there were revocation check failures when using the ++ # libcurl backend as libcurl checks by default, but rustup doesn't provide the ++ # switch to turn this off. Switch to Hyper which looks to not check for ++ # revocation by default like libcurl does. ++ RUSTUP_USE_HYPER: 1 ++ CARGO_HTTP_CHECK_REVOKE: false ++ matrix: ++ - TARGET: x86_64-pc-windows-gnu ++ MSYS2_BITS: 64 ++ - TARGET: i686-pc-windows-gnu ++ MSYS2_BITS: 32 ++ - TARGET: x86_64-pc-windows-msvc ++ - TARGET: i686-pc-windows-msvc ++install: ++ - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe ++ - rustup-init.exe -y --default-host %TARGET% ++ - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin ++ - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin ++ - rustc -V ++ - cargo -V ++ ++build: false ++ ++test_script: ++ - cargo test --target %TARGET% ++ - cargo test --no-default-features --target %TARGET% ++ - cargo test --manifest-path libc-test/Cargo.toml --target %TARGET% diff --cc vendor/libc-0.2.43/ci/README.md index 000000000,000000000..28152e5d0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/README.md @@@ -1,0 -1,0 +1,243 @@@ ++The goal of the libc crate is to have CI running everywhere to have the ++strongest guarantees about the definitions that this library contains, and as a ++result the CI is pretty complicated and also pretty large! Hopefully this can ++serve as a guide through the sea of scripts in this directory and elsewhere in ++this project. ++ ++# Files ++ ++First up, let's talk about the files in this directory: ++ ++* `run-travis.sh` - a shell script run by all Travis builders, this is ++ responsible for setting up the rest of the environment such as installing new ++ packages, downloading Rust target libraries, etc. ++ ++* `run.sh` - the actual script which runs tests for a particular architecture. ++ Called from the `run-travis.sh` script this will run all tests for the target ++ specified. ++ ++* `cargo-config` - Cargo configuration of linkers to use copied into place by ++ the `run-travis.sh` script before builds are run. ++ ++* `dox.sh` - script called from `run-travis.sh` on only the linux 64-bit nightly ++ Travis bots to build documentation for this crate. ++ ++* `landing-page-*.html` - used by `dox.sh` to generate a landing page for all ++ architectures' documentation. ++ ++* `run-qemu.sh` - see discussion about QEMU below ++ ++* `mips`, `rumprun` - instructions to build the docker image for each respective ++ CI target ++ ++# CI Systems ++ ++Currently this repository leverages a combination of Travis CI and AppVeyor for ++running tests. The triples tested are: ++ ++* AppVeyor ++ * `{i686,x86_64}-pc-windows-{msvc,gnu}` ++* Travis ++ * `{i686,x86_64,mips,aarch64}-unknown-linux-gnu` ++ * `{x86_64,aarch64}-unknown-linux-musl` ++ * `arm-unknown-linux-gnueabihf` ++ * `arm-linux-androideabi` ++ * `{i686,x86_64}-apple-{darwin,ios}` ++ * `x86_64-rumprun-netbsd` ++ * `x86_64-unknown-freebsd` ++ * `x86_64-unknown-openbsd` ++ ++The Windows triples are all pretty standard, they just set up their environment ++then run tests, no need for downloading any extra target libs (we just download ++the right installer). The Intel Linux/OSX builds are similar in that we just ++download the right target libs and run tests. Note that the Intel Linux/OSX ++builds are run on stable/beta/nightly, but are the only ones that do so. ++ ++The remaining architectures look like: ++ ++* Android runs in a [docker image][android-docker] with an emulator, the NDK, ++ and the SDK already set up. The entire build happens within the docker image. ++* The MIPS, ARM, and AArch64 builds all use the QEMU userspace emulator to run ++ the generated binary to actually verify the tests pass. ++* The MUSL build just has to download a MUSL compiler and target libraries and ++ then otherwise runs tests normally. ++* iOS builds need an extra linker flag currently, but beyond that they're built ++ as standard as everything else. ++* The rumprun target builds an entire kernel from the test suite and then runs ++ it inside QEMU using the serial console to test whether it succeeded or ++ failed. ++* The BSD builds, currently OpenBSD and FreeBSD, use QEMU to boot up a system ++ and compile/run tests. More information on that below. ++ ++[android-docker]: https://github.com/rust-lang/rust-buildbot/blob/master/slaves/android/Dockerfile ++ ++## QEMU ++ ++Lots of the architectures tested here use QEMU in the tests, so it's worth going ++over all the crazy capabilities QEMU has and the various flavors in which we use ++it! ++ ++First up, QEMU has userspace emulation where it doesn't boot a full kernel, it ++just runs a binary from another architecture (using the `qemu-` wrappers). ++We provide it the runtime path for the dynamically loaded system libraries, ++however. This strategy is used for all Linux architectures that aren't intel. ++Note that one downside of this QEMU system is that threads are barely ++implemented, so we're careful to not spawn many threads. ++ ++For the rumprun target the only output is a kernel image, so we just use that ++plus the `rumpbake` command to create a full kernel image which is then run from ++within QEMU. ++ ++Finally, the fun part, the BSDs. Quite a few hoops are jumped through to get CI ++working for these platforms, but the gist of it looks like: ++ ++* Cross compiling from Linux to any of the BSDs seems to be quite non-standard. ++ We may be able to get it working but it might be difficult at that point to ++ ensure that the libc definitions align with what you'd get on the BSD itself. ++ As a result, we try to do compiles within the BSD distro. ++* On Travis we can't run a VM-in-a-VM, so we resort to userspace emulation ++ (QEMU). ++* Unfortunately on Travis we also can't use KVM, so the emulation is super slow. ++ ++With all that in mind, the way BSD is tested looks like: ++ ++1. Download a pre-prepared image for the OS being tested. ++2. Generate the tests for the OS being tested. This involves running the `ctest` ++ library over libc to generate a Rust file and a C file which will then be ++ compiled into the final test. ++3. Generate a disk image which will later be mounted by the OS being tested. ++ This image is mostly just the libc directory, but some modifications are made ++ to compile the generated files from step 2. ++4. The kernel is booted in QEMU, and it is configured to detect the libc-test ++ image being available, run the test script, and then shut down afterwards. ++5. Look for whether the tests passed in the serial console output of the kernel. ++ ++There's some pretty specific instructions for setting up each image (detailed ++below), but the main gist of this is that we must avoid a vanilla `cargo run` ++inside of the `libc-test` directory (which is what it's intended for) because ++that would compile `syntex_syntax`, a large library, with userspace emulation. ++This invariably times out on Travis, so we can't do that. ++ ++Once all those hoops are jumped through, however, we can be happy that we're ++testing almost everything! ++ ++Below are some details of how to set up the initial OS images which are ++downloaded. Each image must be enabled have input/output over the serial ++console, log in automatically at the serial console, detect if a second drive in ++QEMU is available, and if so mount it, run a script (it'll specifically be ++`run-qemu.sh` in this folder which is copied into the generated image talked ++about above), and then shut down. ++ ++### QEMU Setup - FreeBSD ++ ++1. [Download the latest stable amd64-bootonly release ISO](https://www.freebsd.org/where.html). ++ E.g. FreeBSD-11.1-RELEASE-amd64-bootonly.iso ++2. Create the disk image: `qemu-img create -f qcow2 FreeBSD-11.1-RELEASE-amd64.qcow2 2G` ++3. Boot the machine: `qemu-system-x86_64 -cdrom FreeBSD-11.1-RELEASE-amd64-bootonly.iso -drive if=virtio,file=FreeBSD-11.1-RELEASE-amd64.qcow2 -net nic,model=virtio -net user` ++4. Run the installer, and install FreeBSD: ++ 1. Install ++ 1. Continue with default keymap ++ 1. Set Hostname: freebsd-ci ++ 1. Distribution Select: ++ 1. Uncheck lib32 ++ 1. Uncheck ports ++ 1. Network Configuration: vtnet0 ++ 1. Configure IPv4? Yes ++ 1. DHCP? Yes ++ 1. Configure IPv6? No ++ 1. Resolver Configuration: Ok ++ 1. Mirror Selection: Main Site ++ 1. Partitioning: Auto (UFS) ++ 1. Partition: Entire Disk ++ 1. Partition Scheme: MBR ++ 1. App Partition: Ok ++ 1. Partition Editor: Finish ++ 1. Confirmation: Commit ++ 1. Wait for sets to install ++ 1. Set the root password to nothing (press enter twice) ++ 1. Set time zone to UTC ++ 1. Set Date: Skip ++ 1. Set Time: Skip ++ 1. System Configuration: ++ 1. Disable sshd ++ 1. Disable dumpdev ++ 1. System Hardening ++ 1. Disable Sendmail service ++ 1. Add User Accounts: No ++ 1. Final Configuration: Exit ++ 1. Manual Configuration: Yes ++ 1. `echo 'console="comconsole"' >> /boot/loader.conf` ++ 1. `echo 'autoboot_delay="0"' >> /boot/loader.conf` ++ 1. `echo 'ext2fs_load="YES"' >> /boot/loader.conf` ++ 1. Look at `/etc/ttys`, see what getty argument is for `ttyu0` (E.g. `3wire`) ++ 1. Edit `/etc/gettytab` (with `vi` for example), look for `ttyu0` argument, ++ prepend `:al=root` to the line beneath to have the machine auto-login as ++ root. E.g. ++ ++ 3wire:\ ++ :np:nc:sp#0: ++ becomes: ++ ++ 3wire:\ ++ :al=root:np:nc:sp#0: ++ ++ 1. Edit `/root/.login` and put this in it: ++ ++ [ -e /dev/vtbd1 ] || exit 0 ++ mount -t ext2fs /dev/vtbd1 /mnt ++ sh /mnt/run.sh /mnt ++ poweroff ++ ++ 1. Exit the post install shell: `exit` ++ 1. Back in in the installer choose Reboot ++ 1. If all went well the machine should reboot and show a login prompt. ++ If you switch to the serial console by choosing View > serial0 in ++ the qemu menu, you should be logged in as root. ++ 1. Shutdown the machine: `shutdown -p now` ++ ++Helpful links ++ ++* https://en.wikibooks.org/wiki/QEMU/Images ++* https://blog.nekoconeko.nl/blog/2015/06/04/creating-an-openstack-freebsd-image.html ++* https://www.freebsd.org/doc/handbook/serialconsole-setup.html ++ ++ ++### QEMU setup - OpenBSD ++ ++1. Download CD installer ++2. `qemu-img create -f qcow2 foo.qcow2 2G` ++3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` ++4. run installer ++5. `echo 'set tty com0' >> /etc/boot.conf` ++6. `echo 'boot' >> /etc/boot.conf` ++7. Modify /etc/ttys, change the `tty00` at the end from 'unknown off' to ++ 'vt220 on secure' ++8. Modify same line in /etc/ttys to have `"/root/foo.sh"` as the shell ++9. Add this script to `/root/foo.sh` ++ ++``` ++#!/bin/sh ++exec 1>/dev/tty00 ++exec 2>&1 ++ ++if mount -t ext2fs /dev/sd1c /mnt; then ++ sh /mnt/run.sh /mnt ++ shutdown -ph now ++fi ++ ++# limited shell... ++exec /bin/sh < /dev/tty00 ++``` ++ ++10. `chmod +x /root/foo.sh` ++ ++Helpful links: ++ ++* https://en.wikibooks.org/wiki/QEMU/Images ++* http://www.openbsd.org/faq/faq7.html#SerCon ++ ++# Questions? ++ ++Hopefully that's at least somewhat of an introduction to everything going on ++here, and feel free to ping @alexcrichton with questions! ++ diff --cc vendor/libc-0.2.43/ci/android-install-ndk.sh index 000000000,000000000..873f6c52c new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/android-install-ndk.sh @@@ -1,0 -1,0 +1,37 @@@ ++#!/bin/sh ++# Copyright 2016 The Rust Project Developers. See the COPYRIGHT ++# file at the top-level directory of this distribution and at ++# http://rust-lang.org/COPYRIGHT. ++# ++# Licensed under the Apache License, Version 2.0 or the MIT license ++# , at your ++# option. This file may not be copied, modified, or distributed ++# except according to those terms. ++ ++set -ex ++ ++curl -O https://dl.google.com/android/repository/android-ndk-r15b-linux-x86_64.zip ++unzip -q android-ndk-r15b-linux-x86_64.zip ++ ++case "$1" in ++ aarch64) ++ arch=arm64 ++ ;; ++ ++ i686) ++ arch=x86 ++ ;; ++ ++ *) ++ arch=$1 ++ ;; ++esac; ++ ++android-ndk-r15b/build/tools/make_standalone_toolchain.py \ ++ --unified-headers \ ++ --install-dir /android/ndk-$1 \ ++ --arch $arch \ ++ --api 24 ++ ++rm -rf ./android-ndk-r15b-linux-x86_64.zip ./android-ndk-r15b diff --cc vendor/libc-0.2.43/ci/android-install-sdk.sh index 000000000,000000000..ab7e14d95 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/android-install-sdk.sh @@@ -1,0 -1,0 +1,60 @@@ ++#!/bin/sh ++# Copyright 2016 The Rust Project Developers. See the COPYRIGHT ++# file at the top-level directory of this distribution and at ++# http://rust-lang.org/COPYRIGHT. ++# ++# Licensed under the Apache License, Version 2.0 or the MIT license ++# , at your ++# option. This file may not be copied, modified, or distributed ++# except according to those terms. ++ ++set -ex ++ ++# Prep the SDK and emulator ++# ++# Note that the update process requires that we accept a bunch of licenses, and ++# we can't just pipe `yes` into it for some reason, so we take the same strategy ++# located in https://github.com/appunite/docker by just wrapping it in a script ++# which apparently magically accepts the licenses. ++ ++mkdir sdk ++curl https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip -O ++unzip -d sdk sdk-tools-linux-3859397.zip ++ ++case "$1" in ++ arm | armv7) ++ abi=armeabi-v7a ++ ;; ++ ++ aarch64) ++ abi=arm64-v8a ++ ;; ++ ++ i686) ++ abi=x86 ++ ;; ++ ++ x86_64) ++ abi=x86_64 ++ ;; ++ ++ *) ++ echo "invalid arch: $1" ++ exit 1 ++ ;; ++esac; ++ ++# --no_https avoids ++# javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: No trusted certificate found ++echo "yes" | \ ++ ./sdk/tools/bin/sdkmanager --no_https \ ++ "emulator" \ ++ "platform-tools" \ ++ "platforms;android-24" \ ++ "system-images;android-24;default;$abi" ++ ++echo "no" | ++ ./sdk/tools/bin/avdmanager create avd \ ++ --name $1 \ ++ --package "system-images;android-24;default;$abi" diff --cc vendor/libc-0.2.43/ci/android-sysimage.sh index 000000000,000000000..9611dfeb0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/android-sysimage.sh @@@ -1,0 -1,0 +1,52 @@@ ++# Copyright 2017 The Rust Project Developers. See the COPYRIGHT ++# file at the top-level directory of this distribution and at ++# http://rust-lang.org/COPYRIGHT. ++# ++# Licensed under the Apache License, Version 2.0 or the MIT license ++# , at your ++# option. This file may not be copied, modified, or distributed ++# except according to those terms. ++ ++set -ex ++ ++URL=https://dl.google.com/android/repository/sys-img/android ++ ++main() { ++ local arch=$1 ++ local name=$2 ++ local dest=/system ++ local td=$(mktemp -d) ++ ++ apt-get install --no-install-recommends e2tools ++ ++ pushd $td ++ curl -O $URL/$name ++ unzip -q $name ++ ++ local system=$(find . -name system.img) ++ mkdir -p $dest/{bin,lib,lib64} ++ ++ # Extract android linker and libraries to /system ++ # This allows android executables to be run directly (or with qemu) ++ if [ $arch = "x86_64" -o $arch = "arm64" ]; then ++ e2cp -p $system:/bin/linker64 $dest/bin/ ++ e2cp -p $system:/lib64/libdl.so $dest/lib64/ ++ e2cp -p $system:/lib64/libc.so $dest/lib64/ ++ e2cp -p $system:/lib64/libm.so $dest/lib64/ ++ else ++ e2cp -p $system:/bin/linker $dest/bin/ ++ e2cp -p $system:/lib/libdl.so $dest/lib/ ++ e2cp -p $system:/lib/libc.so $dest/lib/ ++ e2cp -p $system:/lib/libm.so $dest/lib/ ++ fi ++ ++ # clean up ++ apt-get purge --auto-remove -y e2tools ++ ++ popd ++ ++ rm -rf $td ++} ++ ++main "${@}" diff --cc vendor/libc-0.2.43/ci/docker/aarch64-linux-android/Dockerfile index 000000000,000000000..5fc83aadb new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/aarch64-linux-android/Dockerfile @@@ -1,0 -1,0 +1,45 @@@ ++FROM ubuntu:16.04 ++ ++RUN dpkg --add-architecture i386 && \ ++ apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ file \ ++ curl \ ++ ca-certificates \ ++ python \ ++ unzip \ ++ expect \ ++ openjdk-9-jre \ ++ libstdc++6:i386 \ ++ libpulse0 \ ++ gcc \ ++ libc6-dev ++ ++WORKDIR /android/ ++COPY android* /android/ ++ ++ENV ANDROID_ARCH=aarch64 ++ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools ++ ++RUN sh /android/android-install-ndk.sh $ANDROID_ARCH ++RUN sh /android/android-install-sdk.sh $ANDROID_ARCH ++RUN mv /root/.android /tmp ++RUN chmod 777 -R /tmp/.android ++RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* ++ ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER=aarch64-linux-android-gcc \ ++ CARGO_TARGET_AARCH64_LINUX_ANDROID_RUNNER=/tmp/runtest \ ++ HOME=/tmp ++ ++ADD runtest-android.rs /tmp/runtest.rs ++ENTRYPOINT [ \ ++ "bash", \ ++ "-c", \ ++ # set SHELL so android can detect a 64bits system, see ++ # http://stackoverflow.com/a/41789144 ++ "SHELL=/bin/dash /android/sdk/emulator/emulator @aarch64 -no-window & \ ++ rustc /tmp/runtest.rs -o /tmp/runtest && \ ++ exec \"$@\"", \ ++ "--" \ ++] diff --cc vendor/libc-0.2.43/ci/docker/aarch64-unknown-linux-gnu/Dockerfile index 000000000,000000000..18214a3e6 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,7 @@@ ++FROM ubuntu:17.10 ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev ca-certificates \ ++ gcc-aarch64-linux-gnu libc6-dev-arm64-cross qemu-user ++ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ ++ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64 -L /usr/aarch64-linux-gnu" \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/aarch64-unknown-linux-musl/Dockerfile index 000000000,000000000..caec1572c new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/aarch64-unknown-linux-musl/Dockerfile @@@ -1,0 -1,0 +1,27 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc make libc6-dev git curl ca-certificates \ ++ gcc-aarch64-linux-gnu qemu-user ++RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ ++ tar xzf - && \ ++ cd musl-1.1.19 && \ ++ CC=aarch64-linux-gnu-gcc \ ++ ./configure --prefix=/musl-aarch64 --enable-wrapper=yes && \ ++ make install -j4 && \ ++ cd .. && \ ++ rm -rf musl-1.1.19 ++# Install linux kernel headers sanitized for use with musl ++RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ ++ tar xzf - && \ ++ cd kernel-headers-3.12.6-6 && \ ++ make ARCH=arm64 prefix=/musl-aarch64 install -j4 && \ ++ cd .. && \ ++ rm -rf kernel-headers-3.12.6-6 ++ ++# FIXME: shouldn't need the `-lgcc` here, shouldn't that be in libstd? ++ENV PATH=$PATH:/musl-aarch64/bin:/rust/bin \ ++ CC_aarch64_unknown_linux_musl=musl-gcc \ ++ RUSTFLAGS='-Clink-args=-lgcc' \ ++ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=musl-gcc \ ++ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUNNER="qemu-aarch64 -L /musl-aarch64" diff --cc vendor/libc-0.2.43/ci/docker/arm-linux-androideabi/Dockerfile index 000000000,000000000..a3fc64bfd new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/arm-linux-androideabi/Dockerfile @@@ -1,0 -1,0 +1,45 @@@ ++FROM ubuntu:16.04 ++ ++RUN dpkg --add-architecture i386 && \ ++ apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ file \ ++ curl \ ++ ca-certificates \ ++ python \ ++ unzip \ ++ expect \ ++ openjdk-9-jre \ ++ libstdc++6:i386 \ ++ libpulse0 \ ++ gcc \ ++ libc6-dev ++ ++WORKDIR /android/ ++COPY android* /android/ ++ ++ENV ANDROID_ARCH=arm ++ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools ++ ++RUN sh /android/android-install-ndk.sh $ANDROID_ARCH ++RUN sh /android/android-install-sdk.sh $ANDROID_ARCH ++RUN mv /root/.android /tmp ++RUN chmod 777 -R /tmp/.android ++RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* ++ ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ ++ CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER=/tmp/runtest \ ++ HOME=/tmp ++ ++ADD runtest-android.rs /tmp/runtest.rs ++ENTRYPOINT [ \ ++ "bash", \ ++ "-c", \ ++ # set SHELL so android can detect a 64bits system, see ++ # http://stackoverflow.com/a/41789144 ++ "SHELL=/bin/dash /android/sdk/emulator/emulator @arm -no-window & \ ++ rustc /tmp/runtest.rs -o /tmp/runtest && \ ++ exec \"$@\"", \ ++ "--" \ ++] diff --cc vendor/libc-0.2.43/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile index 000000000,000000000..9fe71dcf8 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile @@@ -1,0 -1,0 +1,7 @@@ ++FROM ubuntu:17.10 ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev ca-certificates \ ++ gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user ++ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ ++ CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_RUNNER="qemu-arm -L /usr/arm-linux-gnueabihf" \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/arm-unknown-linux-musleabihf/Dockerfile index 000000000,000000000..86304130f new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/arm-unknown-linux-musleabihf/Dockerfile @@@ -1,0 -1,0 +1,25 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc make libc6-dev git curl ca-certificates \ ++ gcc-arm-linux-gnueabihf qemu-user ++ ++RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | tar xzf - ++WORKDIR /musl-1.1.19 ++RUN CC=arm-linux-gnueabihf-gcc \ ++ CFLAGS="-march=armv6 -marm" \ ++ ./configure --prefix=/musl-arm --enable-wrapper=yes ++RUN make install -j4 ++ ++# Install linux kernel headers sanitized for use with musl ++RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ ++ tar xzf - && \ ++ cd kernel-headers-3.12.6-6 && \ ++ make ARCH=arm prefix=/musl-arm install -j4 && \ ++ cd .. && \ ++ rm -rf kernel-headers-3.12.6-6 ++ ++ENV PATH=$PATH:/musl-arm/bin:/rust/bin \ ++ CC_arm_unknown_linux_musleabihf=musl-gcc \ ++ CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_LINKER=musl-gcc \ ++ CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_RUNNER="qemu-arm -L /musl-arm" diff --cc vendor/libc-0.2.43/ci/docker/asmjs-unknown-emscripten/Dockerfile index 000000000,000000000..3088fc53c new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/asmjs-unknown-emscripten/Dockerfile @@@ -1,0 -1,0 +1,20 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ ca-certificates \ ++ curl \ ++ gcc \ ++ git \ ++ libc6-dev \ ++ python \ ++ xz-utils ++ ++COPY emscripten.sh / ++RUN bash /emscripten.sh ++ ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_ASMJS_UNKNOWN_EMSCRIPTEN_RUNNER=node ++ ++COPY emscripten-entry.sh / ++ENTRYPOINT ["/emscripten-entry.sh"] diff --cc vendor/libc-0.2.43/ci/docker/i686-linux-android/Dockerfile index 000000000,000000000..f0836c385 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/i686-linux-android/Dockerfile @@@ -1,0 -1,0 +1,45 @@@ ++FROM ubuntu:16.04 ++ ++RUN dpkg --add-architecture i386 && \ ++ apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ file \ ++ curl \ ++ ca-certificates \ ++ python \ ++ unzip \ ++ expect \ ++ openjdk-9-jre \ ++ libstdc++6:i386 \ ++ libpulse0 \ ++ gcc \ ++ libc6-dev ++ ++WORKDIR /android/ ++COPY android* /android/ ++ ++ENV ANDROID_ARCH=i686 ++ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools ++ ++RUN sh /android/android-install-ndk.sh $ANDROID_ARCH ++RUN sh /android/android-install-sdk.sh $ANDROID_ARCH ++RUN mv /root/.android /tmp ++RUN chmod 777 -R /tmp/.android ++RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* ++ ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_I686_LINUX_ANDROID_LINKER=i686-linux-android-gcc \ ++ CARGO_TARGET_I686_LINUX_ANDROID_RUNNER=/tmp/runtest \ ++ HOME=/tmp ++ ++ADD runtest-android.rs /tmp/runtest.rs ++ENTRYPOINT [ \ ++ "bash", \ ++ "-c", \ ++ # set SHELL so android can detect a 64bits system, see ++ # http://stackoverflow.com/a/41789144 ++ "SHELL=/bin/dash /android/sdk/emulator/emulator @i686 -no-window -no-accel & \ ++ rustc /tmp/runtest.rs -o /tmp/runtest && \ ++ exec \"$@\"", \ ++ "--" \ ++] diff --cc vendor/libc-0.2.43/ci/docker/i686-unknown-linux-gnu/Dockerfile index 000000000,000000000..03f3e8e69 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/i686-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,5 @@@ ++FROM ubuntu:18.04 ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc-multilib libc6-dev ca-certificates ++ENV PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/i686-unknown-linux-musl/Dockerfile index 000000000,000000000..49f37d70f new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/i686-unknown-linux-musl/Dockerfile @@@ -1,0 -1,0 +1,31 @@@ ++FROM ubuntu:17.10 ++ ++RUN dpkg --add-architecture i386 ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc-multilib make libc6-dev git curl ca-certificates libc6:i386 ++# Below we're cross-compiling musl for i686 using the system compiler on an ++# x86_64 system. This is an awkward thing to be doing and so we have to jump ++# through a couple hoops to get musl to be happy. In particular: ++# ++# * We specifically pass -m32 in CFLAGS and override CC when running ./configure, ++# since otherwise the script will fail to find a compiler. ++# * We manually unset CROSS_COMPILE when running make; otherwise the makefile ++# will call the non-existent binary 'i686-ar'. ++RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ ++ tar xzf - && \ ++ cd musl-1.1.19 && \ ++ CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ ++ make CROSS_COMPILE= install -j4 && \ ++ cd .. && \ ++ rm -rf musl-1.1.19 ++# Install linux kernel headers sanitized for use with musl ++RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ ++ tar xzf - && \ ++ cd kernel-headers-3.12.6-6 && \ ++ make ARCH=i386 prefix=/musl-i686 install -j4 && \ ++ cd .. && \ ++ rm -rf kernel-headers-3.12.6-6 ++ ++ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ ++ CC_i686_unknown_linux_musl=musl-gcc diff --cc vendor/libc-0.2.43/ci/docker/mips-unknown-linux-gnu/Dockerfile index 000000000,000000000..c66abd471 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/mips-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,10 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-mips-linux-gnu libc6-dev-mips-cross \ ++ qemu-system-mips ++ ++ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ ++ CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_RUNNER="qemu-mips -L /usr/mips-linux-gnu" \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/mips-unknown-linux-musl/Dockerfile index 000000000,000000000..91ffd5817 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/mips-unknown-linux-musl/Dockerfile @@@ -1,0 -1,0 +1,17 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ ++ bzip2 ++ ++RUN mkdir /toolchain ++ ++# Note that this originally came from: ++# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 ++RUN curl -L https://s3-us-west-1.amazonaws.com/rust-lang-ci2/libc/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ ++ tar xjf - -C /toolchain --strip-components=1 ++ ++ENV PATH=$PATH:/rust/bin:/toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15/bin \ ++ CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ ++ CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-openwrt-linux-gcc \ ++ CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_RUNNER="qemu-mips -L /toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15" diff --cc vendor/libc-0.2.43/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile index 000000000,000000000..b9921fcc5 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile @@@ -1,0 -1,0 +1,11 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-mips64-linux-gnuabi64 libc6-dev-mips64-cross \ ++ qemu-system-mips64 ++ ++ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER=mips64-linux-gnuabi64-gcc \ ++ CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_RUNNER="qemu-mips64 -L /usr/mips64-linux-gnuabi64" \ ++ CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile index 000000000,000000000..434c90819 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile @@@ -1,0 -1,0 +1,11 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-mips64el-linux-gnuabi64 libc6-dev-mips64el-cross \ ++ qemu-system-mips64el ++ ++ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_LINKER=mips64el-linux-gnuabi64-gcc \ ++ CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_RUNNER="qemu-mips64el -L /usr/mips64el-linux-gnuabi64" \ ++ CC_mips64el_unknown_linux_gnuabi64=mips64el-linux-gnuabi64-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/mipsel-unknown-linux-musl/Dockerfile index 000000000,000000000..3642fa8ca new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/mipsel-unknown-linux-musl/Dockerfile @@@ -1,0 -1,0 +1,17 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ ++ bzip2 ++ ++RUN mkdir /toolchain ++ ++# Note that this originally came from: ++# https://downloads.openwrt.org/snapshots/trunk/malta/generic/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 ++RUN curl -L https://s3-us-west-1.amazonaws.com/rust-lang-ci2/libc/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ ++ tar xjf - -C /toolchain --strip-components=2 ++ ++ENV PATH=$PATH:/rust/bin:/toolchain/bin \ ++ CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ ++ CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-openwrt-linux-gcc \ ++ CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_RUNNER="qemu-mipsel -L /toolchain" diff --cc vendor/libc-0.2.43/ci/docker/powerpc-unknown-linux-gnu/Dockerfile index 000000000,000000000..106ada444 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/powerpc-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,10 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ ++ qemu-system-ppc ++ ++ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ ++ CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER="qemu-ppc -L /usr/powerpc-linux-gnu" \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile index 000000000,000000000..a6ab66a9a new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,11 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ ++ qemu-system-ppc ++ ++ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ ++ CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_RUNNER="qemu-ppc64 -L /usr/powerpc64-linux-gnu" \ ++ CC=powerpc64-linux-gnu-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile index 000000000,000000000..627123e9a new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,11 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ gcc libc6-dev qemu-user ca-certificates \ ++ gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross \ ++ qemu-system-ppc ++ ++ENV CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_LINKER=powerpc64le-linux-gnu-gcc \ ++ CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_RUNNER="qemu-ppc64le -L /usr/powerpc64le-linux-gnu" \ ++ CC=powerpc64le-linux-gnu-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/s390x-unknown-linux-gnu/Dockerfile index 000000000,000000000..861f4f9b0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/s390x-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,18 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ curl ca-certificates \ ++ gcc libc6-dev \ ++ gcc-s390x-linux-gnu libc6-dev-s390x-cross \ ++ qemu-system-s390x \ ++ cpio ++ ++COPY linux-s390x.sh / ++RUN bash /linux-s390x.sh ++ ++COPY test-runner-linux / ++ ++ENV CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_LINKER=s390x-linux-gnu-gcc \ ++ CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_RUNNER="/test-runner-linux s390x" \ ++ CC_s390x_unknown_linux_gnu=s390x-linux-gnu-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/sparc64-unknown-linux-gnu/Dockerfile index 000000000,000000000..d9edaab42 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/sparc64-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,21 @@@ ++FROM debian:stretch ++ ++RUN apt-get update && apt-get install -y --no-install-recommends \ ++ curl ca-certificates \ ++ gcc libc6-dev \ ++ gcc-sparc64-linux-gnu libc6-dev-sparc64-cross \ ++ qemu-system-sparc64 openbios-sparc seabios ipxe-qemu \ ++ p7zip-full cpio linux-libc-dev-sparc64-cross linux-headers-4.9.0-3-common ++ ++# Put linux/module.h into the right spot as it is not shipped by debian ++RUN cp /usr/src/linux-headers-4.9.0-3-common/include/uapi/linux/module.h /usr/sparc64-linux-gnu/include/linux/ ++ ++COPY linux-sparc64.sh / ++RUN bash /linux-sparc64.sh ++ ++COPY test-runner-linux / ++ ++ENV CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_LINKER=sparc64-linux-gnu-gcc \ ++ CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_RUNNER="/test-runner-linux sparc64" \ ++ CC_sparc64_unknown_linux_gnu=sparc64-linux-gnu-gcc \ ++ PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/wasm32-unknown-emscripten/Dockerfile index 000000000,000000000..59bf7d9a2 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/wasm32-unknown-emscripten/Dockerfile @@@ -1,0 -1,0 +1,21 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ ca-certificates \ ++ curl \ ++ gcc \ ++ git \ ++ libc6-dev \ ++ python \ ++ xz-utils ++ ++COPY emscripten.sh / ++RUN bash /emscripten.sh ++ ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_RUNNER=node-wrapper.sh ++ ++COPY emscripten-entry.sh / ++COPY docker/wasm32-unknown-emscripten/node-wrapper.sh /usr/local/bin/node-wrapper.sh ++ENTRYPOINT ["/emscripten-entry.sh"] diff --cc vendor/libc-0.2.43/ci/docker/wasm32-unknown-emscripten/node-wrapper.sh index 000000000,000000000..3122e2e23 new file mode 100755 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/wasm32-unknown-emscripten/node-wrapper.sh @@@ -1,0 -1,0 +1,15 @@@ ++#!/bin/sh ++ ++set -e ++ ++me=$1 ++shift ++dir=$(dirname $me) ++file=$(basename $me) ++ ++if echo $file | grep -q wasm; then ++ exit 0 # FIXME(rust-lang/cargo#4750) ++fi ++ ++cd $dir ++exec node $file "$@" diff --cc vendor/libc-0.2.43/ci/docker/x86_64-linux-android/Dockerfile index 000000000,000000000..0cfbc4820 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-linux-android/Dockerfile @@@ -1,0 -1,0 +1,26 @@@ ++FROM ubuntu:16.04 ++ ++RUN apt-get update && \ ++ apt-get install -y --no-install-recommends \ ++ ca-certificates \ ++ curl \ ++ gcc \ ++ libc-dev \ ++ python \ ++ unzip ++ ++WORKDIR /android/ ++ENV ANDROID_ARCH=x86_64 ++COPY android-install-ndk.sh /android/ ++RUN sh /android/android-install-ndk.sh $ANDROID_ARCH ++ ++# We do not run x86_64-linux-android tests on an android emulator. ++# See ci/android-sysimage.sh for informations about how tests are run. ++COPY android-sysimage.sh /android/ ++RUN bash /android/android-sysimage.sh x86_64 x86_64-24_r07.zip ++ ++ENV PATH=$PATH:/rust/bin:/android/ndk-$ANDROID_ARCH/bin \ ++ CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=x86_64-linux-android-gcc \ ++ CC_x86_64_linux_android=x86_64-linux-android-gcc \ ++ CXX_x86_64_linux_android=x86_64-linux-android-g++ \ ++ HOME=/tmp diff --cc vendor/libc-0.2.43/ci/docker/x86_64-rumprun-netbsd/Dockerfile index 000000000,000000000..a486d05b2 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-rumprun-netbsd/Dockerfile @@@ -1,0 -1,0 +1,10 @@@ ++FROM mato/rumprun-toolchain-hw-x86_64 ++USER root ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ qemu ++ENV PATH=$PATH:/rust/bin \ ++ CARGO_TARGET_X86_64_RUMPRUN_NETBSD_RUNNER=/tmp/runtest ++ ++ADD docker/x86_64-rumprun-netbsd/runtest.rs /tmp/ ++ENTRYPOINT ["sh", "-c", "rustc /tmp/runtest.rs -o /tmp/runtest && exec \"$@\"", "--"] diff --cc vendor/libc-0.2.43/ci/docker/x86_64-rumprun-netbsd/runtest.rs index 000000000,000000000..94b594608 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-rumprun-netbsd/runtest.rs @@@ -1,0 -1,0 +1,54 @@@ ++use std::env; ++use std::process::{Command, Stdio}; ++use std::sync::mpsc; ++use std::thread; ++use std::time::Duration; ++use std::io::{BufRead, BufReader, Read}; ++ ++fn main() { ++ assert_eq!(env::args().len(), 2); ++ ++ let status = Command::new("rumprun-bake") ++ .arg("hw_virtio") ++ .arg("/tmp/libc-test.img") ++ .arg(env::args().nth(1).unwrap()) ++ .status() ++ .expect("failed to run rumprun-bake"); ++ assert!(status.success()); ++ ++ let mut child = Command::new("qemu-system-x86_64") ++ .arg("-nographic") ++ .arg("-vga").arg("none") ++ .arg("-m").arg("64") ++ .arg("-kernel").arg("/tmp/libc-test.img") ++ .stdout(Stdio::piped()) ++ .stderr(Stdio::piped()) ++ .spawn() ++ .expect("failed to spawn qemu"); ++ ++ let mut stdout = child.stdout.take().unwrap(); ++ let mut stderr = child.stderr.take().unwrap(); ++ let (tx, rx) = mpsc::channel(); ++ let tx2 = tx.clone(); ++ let t1 = thread::spawn(move || find_ok(&mut stdout, tx)); ++ let t2 = thread::spawn(move || find_ok(&mut stderr, tx2)); ++ ++ let res = rx.recv_timeout(Duration::new(5, 0)); ++ child.kill().unwrap(); ++ t1.join().unwrap(); ++ t2.join().unwrap(); ++ ++ if res.is_err() { ++ panic!("didn't find success"); ++ } ++} ++ ++fn find_ok(input: &mut Read, tx: mpsc::Sender<()>) { ++ for line in BufReader::new(input).lines() { ++ let line = line.unwrap(); ++ println!("{}", line); ++ if line.starts_with("PASSED ") && line.contains(" tests") { ++ tx.send(()).unwrap(); ++ } ++ } ++} diff --cc vendor/libc-0.2.43/ci/docker/x86_64-unknown-freebsd/Dockerfile index 000000000,000000000..35f103657 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-unknown-freebsd/Dockerfile @@@ -1,0 -1,0 +1,13 @@@ ++FROM wezm/port-prebuilt-freebsd11@sha256:43553e2265ec702ec72a63a765df333f50b1858b896e69385749e96d8624e9b0 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ qemu genext2fs xz-utils ++RUN apt-get install -y curl ca-certificates gcc ++ ++ENTRYPOINT ["sh"] ++ ++ENV PATH=$PATH:/rust/bin \ ++ QEMU=2018-03-15/FreeBSD-11.1-RELEASE-amd64.qcow2.xz \ ++ CAN_CROSS=1 \ ++ CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd11-gcc diff --cc vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-gnu/Dockerfile index 000000000,000000000..6ab9c9231 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@@ -1,0 -1,0 +1,5 @@@ ++FROM ubuntu:18.04 ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc libc6-dev ca-certificates ++ENV PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-gnux32/Dockerfile index 000000000,000000000..03f3e8e69 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-gnux32/Dockerfile @@@ -1,0 -1,0 +1,5 @@@ ++FROM ubuntu:18.04 ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc-multilib libc6-dev ca-certificates ++ENV PATH=$PATH:/rust/bin diff --cc vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-musl/Dockerfile index 000000000,000000000..6e2b7d9e5 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/docker/x86_64-unknown-linux-musl/Dockerfile @@@ -1,0 -1,0 +1,20 @@@ ++FROM ubuntu:17.10 ++ ++RUN apt-get update ++RUN apt-get install -y --no-install-recommends \ ++ gcc make libc6-dev git curl ca-certificates ++RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ ++ tar xzf - && \ ++ cd musl-1.1.19 && \ ++ ./configure --prefix=/musl-x86_64 && \ ++ make install -j4 && \ ++ cd .. && \ ++ rm -rf musl-1.1.19 ++# Install linux kernel headers sanitized for use with musl ++RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ ++ tar xzf - && \ ++ cd kernel-headers-3.12.6-6 && \ ++ make ARCH=x86_64 prefix=/musl-x86_64 install -j4 && \ ++ cd .. && \ ++ rm -rf kernel-headers-3.12.6-6 ++ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff --cc vendor/libc-0.2.43/ci/dox.sh index 000000000,000000000..b8ffa7dd0 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/dox.sh @@@ -1,0 -1,0 +1,33 @@@ ++#!/bin/sh ++ ++# Builds documentation for all target triples that we have a registered URL for ++# in liblibc. This scrapes the list of triples to document from `src/lib.rs` ++# which has a bunch of `html_root_url` directives we pick up. ++ ++set -e ++ ++TARGETS=`grep html_root_url src/lib.rs | sed 's/.*".*\/\(.*\)"/\1/'` ++ ++rm -rf target/doc ++mkdir -p target/doc ++ ++cp ci/landing-page-head.html target/doc/index.html ++ ++for target in $TARGETS; do ++ echo documenting $target ++ ++ rustdoc -o target/doc/$target --target $target src/lib.rs --cfg cross_platform_docs \ ++ --crate-name libc ++ ++ echo "

  • $target
  • " \ ++ >> target/doc/index.html ++done ++ ++cat ci/landing-page-footer.html >> target/doc/index.html ++ ++# If we're on travis, not a PR, and on the right branch, publish! ++if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ "$TRAVIS_BRANCH" = "master" ]; then ++ pip install ghp_import --install-option="--prefix=$HOME/.local" ++ $HOME/.local/bin/ghp-import -n target/doc ++ git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages ++fi diff --cc vendor/libc-0.2.43/ci/emscripten-entry.sh index 000000000,000000000..22ae8b08a new file mode 100755 --- /dev/null +++ b/vendor/libc-0.2.43/ci/emscripten-entry.sh @@@ -1,0 -1,0 +1,19 @@@ ++#!/usr/bin/env bash ++# Copyright 2017 The Rust Project Developers. See the COPYRIGHT ++# file at the top-level directory of this distribution and at ++# http://rust-lang.org/COPYRIGHT. ++# ++# Licensed under the Apache License, Version 2.0 or the MIT license ++# , at your ++# option. This file may not be copied, modified, or distributed ++# except according to those terms. ++ ++set -ex ++ ++source /emsdk-portable/emsdk_env.sh &> /dev/null ++ ++# emsdk-portable provides a node binary, but we need version 8 to run wasm ++export PATH="/node-v8.0.0-linux-x64/bin:$PATH" ++ ++exec "$@" diff --cc vendor/libc-0.2.43/ci/emscripten.sh index 000000000,000000000..d80258584 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/emscripten.sh @@@ -1,0 -1,0 +1,54 @@@ ++# Copyright 2017 The Rust Project Developers. See the COPYRIGHT ++# file at the top-level directory of this distribution and at ++# http://rust-lang.org/COPYRIGHT. ++# ++# Licensed under the Apache License, Version 2.0 or the MIT license ++# , at your ++# option. This file may not be copied, modified, or distributed ++# except according to those terms. ++ ++set -ex ++ ++hide_output() { ++ set +x ++ on_err=" ++echo ERROR: An error was encountered with the build. ++cat /tmp/build.log ++exit 1 ++" ++ trap "$on_err" ERR ++ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" & ++ PING_LOOP_PID=$! ++ $@ &> /tmp/build.log ++ trap - ERR ++ kill $PING_LOOP_PID ++ rm -f /tmp/build.log ++ set -x ++} ++ ++cd / ++curl -L https://s3.amazonaws.com/mozilla-games/emscripten/releases/emsdk-portable.tar.gz | \ ++ tar -xz ++ ++cd /emsdk-portable ++./emsdk update ++hide_output ./emsdk install sdk-1.37.20-64bit ++./emsdk activate sdk-1.37.20-64bit ++ ++# Compile and cache libc ++source ./emsdk_env.sh ++echo "main(){}" > a.c ++HOME=/emsdk-portable/ emcc a.c ++HOME=/emsdk-portable/ emcc -s BINARYEN=1 a.c ++rm -f a.* ++ ++# Make emsdk usable by any user ++cp /root/.emscripten /emsdk-portable ++chmod a+rxw -R /emsdk-portable ++ ++# node 8 is required to run wasm ++cd / ++curl -L https://nodejs.org/dist/v8.0.0/node-v8.0.0-linux-x64.tar.xz | \ ++ tar -xJ ++ diff --cc vendor/libc-0.2.43/ci/ios/deploy_and_run_on_ios_simulator.rs index 000000000,000000000..95df52d76 new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/ios/deploy_and_run_on_ios_simulator.rs @@@ -1,0 -1,0 +1,172 @@@ ++// Copyright 2017 The Rust Project Developers. See the COPYRIGHT ++// file at the top-level directory of this distribution and at ++// http://rust-lang.org/COPYRIGHT. ++// ++// Licensed under the Apache License, Version 2.0 or the MIT license ++// , at your ++// option. This file may not be copied, modified, or distributed ++// except according to those terms. ++ ++// This is a script to deploy and execute a binary on an iOS simulator. ++// The primary use of this is to be able to run unit tests on the simulator and ++// retrieve the results. ++// ++// To do this through Cargo instead, use Dinghy ++// (https://github.com/snipsco/dinghy): cargo dinghy install, then cargo dinghy ++// test. ++ ++use std::env; ++use std::fs::{self, File}; ++use std::io::Write; ++use std::path::Path; ++use std::process; ++use std::process::Command; ++ ++macro_rules! t { ++ ($e:expr) => (match $e { ++ Ok(e) => e, ++ Err(e) => panic!("{} failed with: {}", stringify!($e), e), ++ }) ++} ++ ++// Step one: Wrap as an app ++fn package_as_simulator_app(crate_name: &str, test_binary_path: &Path) { ++ println!("Packaging simulator app"); ++ drop(fs::remove_dir_all("ios_simulator_app")); ++ t!(fs::create_dir("ios_simulator_app")); ++ t!(fs::copy(test_binary_path, ++ Path::new("ios_simulator_app").join(crate_name))); ++ ++ let mut f = t!(File::create("ios_simulator_app/Info.plist")); ++ t!(f.write_all(format!(r#" ++ ++ ++ ++ ++ CFBundleExecutable ++ {} ++ CFBundleIdentifier ++ com.rust.unittests ++ ++ ++ "#, crate_name).as_bytes())); ++} ++ ++// Step two: Start the iOS simulator ++fn start_simulator() { ++ println!("Looking for iOS simulator"); ++ let output = t!(Command::new("xcrun").arg("simctl").arg("list").output()); ++ assert!(output.status.success()); ++ let mut simulator_exists = false; ++ let mut simulator_booted = false; ++ let mut found_rust_sim = false; ++ let stdout = t!(String::from_utf8(output.stdout)); ++ for line in stdout.lines() { ++ if line.contains("rust_ios") { ++ if found_rust_sim { ++ panic!("Duplicate rust_ios simulators found. Please \ ++ double-check xcrun simctl list."); ++ } ++ simulator_exists = true; ++ simulator_booted = line.contains("(Booted)"); ++ found_rust_sim = true; ++ } ++ } ++ ++ if simulator_exists == false { ++ println!("Creating iOS simulator"); ++ Command::new("xcrun") ++ .arg("simctl") ++ .arg("create") ++ .arg("rust_ios") ++ .arg("com.apple.CoreSimulator.SimDeviceType.iPhone-SE") ++ .arg("com.apple.CoreSimulator.SimRuntime.iOS-10-2") ++ .check_status(); ++ } else if simulator_booted == true { ++ println!("Shutting down already-booted simulator"); ++ Command::new("xcrun") ++ .arg("simctl") ++ .arg("shutdown") ++ .arg("rust_ios") ++ .check_status(); ++ } ++ ++ println!("Starting iOS simulator"); ++ // We can't uninstall the app (if present) as that will hang if the ++ // simulator isn't completely booted; just erase the simulator instead. ++ Command::new("xcrun").arg("simctl").arg("erase").arg("rust_ios").check_status(); ++ Command::new("xcrun").arg("simctl").arg("boot").arg("rust_ios").check_status(); ++} ++ ++// Step three: Install the app ++fn install_app_to_simulator() { ++ println!("Installing app to simulator"); ++ Command::new("xcrun") ++ .arg("simctl") ++ .arg("install") ++ .arg("booted") ++ .arg("ios_simulator_app/") ++ .check_status(); ++} ++ ++// Step four: Run the app ++fn run_app_on_simulator() { ++ println!("Running app"); ++ let output = t!(Command::new("xcrun") ++ .arg("simctl") ++ .arg("launch") ++ .arg("--console") ++ .arg("booted") ++ .arg("com.rust.unittests") ++ .output()); ++ ++ println!("status: {}", output.status); ++ println!("stdout --\n{}\n", String::from_utf8_lossy(&output.stdout)); ++ println!("stderr --\n{}\n", String::from_utf8_lossy(&output.stderr)); ++ ++ let stdout = String::from_utf8_lossy(&output.stdout); ++ let passed = stdout.lines() ++ .find(|l| l.contains("PASSED")) ++ .map(|l| l.contains("tests")) ++ .unwrap_or(false); ++ ++ println!("Shutting down simulator"); ++ Command::new("xcrun") ++ .arg("simctl") ++ .arg("shutdown") ++ .arg("rust_ios") ++ .check_status(); ++ if !passed { ++ panic!("tests didn't pass"); ++ } ++} ++ ++trait CheckStatus { ++ fn check_status(&mut self); ++} ++ ++impl CheckStatus for Command { ++ fn check_status(&mut self) { ++ println!("\trunning: {:?}", self); ++ assert!(t!(self.status()).success()); ++ } ++} ++ ++fn main() { ++ let args: Vec = env::args().collect(); ++ if args.len() != 2 { ++ println!("Usage: {} ", args[0]); ++ process::exit(-1); ++ } ++ ++ let test_binary_path = Path::new(&args[1]); ++ let crate_name = test_binary_path.file_name().unwrap(); ++ ++ package_as_simulator_app(crate_name.to_str().unwrap(), test_binary_path); ++ start_simulator(); ++ install_app_to_simulator(); ++ run_app_on_simulator(); ++} diff --cc vendor/libc-0.2.43/ci/landing-page-footer.html index 000000000,000000000..941cc8d2b new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/landing-page-footer.html @@@ -1,0 -1,0 +1,3 @@@ ++ ++ ++ diff --cc vendor/libc-0.2.43/ci/landing-page-head.html index 000000000,000000000..fc69fa88e new file mode 100644 --- /dev/null +++ b/vendor/libc-0.2.43/ci/landing-page-head.html @@@ -1,0 -1,0 +1,7 @@@ ++ ++ ++ ++ ++ ++ ++